prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# ---------------------------------------------------------------------------------------------
# MIT License
# Copyright (c) 2020, Solace Corporation, <NAME> (<EMAIL>)
# ---------------------------------------------------------------------------------------------
import array as arr
import json
from .broker_series import BrokerSeries
from .common_base import CommonBase
from .constants import *
from ._constants import *
from .latency_broker_latency_series import LatencyBrokerLatencySeries
from .latency_node_latency_series import LatencyNodeLatencySeries
from .ping_series import PingSeries
from .run_meta import RunMeta
from .run import Run
import numpy as np
import pandas as pd
CHECK_PASSING_MD="**<span style='color:green'>passing</span>**"
CHECK_FAILING_MD="**<span style='color:red'>failing</span>**"
d_latency_percentile = {
# k_latency_00_05th : 0.005,
# k_latency_01_th : 0.01,
# k_latency_00_5th : 0.05,
# k_latency_10th : 0.10,
# k_latency_25th : 0.25,
# k_latency_50th : 0.5,
# k_latency_75th : 0.75,
k_latency_90th : 0.90,
k_latency_95th : 0.95,
k_latency_99th : 0.99,
k_latency_99_5th : 0.995,
k_latency_99_9th : 0.999,
# k_latency_99_95th : 0.9995,
# k_latency_99_99th : 0.9999,
# k_latency_99_995th : 0.99995,
# k_latency_99_999th : 0.99999,
}
class RunAnalytics():
def __init__(self, run):
self.run = run
def _export_broker_metric_by_consumer_as_dataframe(self, broker_metric, conversion_function=None):
client_connection_details_series = self.run.broker_series.getSeriesOfListOfClientConnectionDetails()
client_list=self.run.run_meta.getConsumerNamesAsDict()
for client_connection_details_sample in client_connection_details_series:
for client_connection_detail in client_connection_details_sample["client_connection_details"]:
client_name = self.run.run_meta.composeDisplayClientName(client_connection_detail['clientName'])
if client_name in client_list:
if conversion_function:
value = conversion_function(client_connection_detail[broker_metric])
else:
value = client_connection_detail[broker_metric]
client_list[client_name].append(value)
return pd.DataFrame(
data=client_list
)
def export_broker_txQueueByteCount_by_consumer_as_dataframe(self):
return self._export_broker_metric_by_consumer_as_dataframe('txQueueByteCount')
def export_broker_smoothedRoundTripTime_by_consumer_as_dataframe(self):
def convert2Micros(value):
return value / 1000
return self._export_broker_metric_by_consumer_as_dataframe('smoothedRoundTripTime', convert2Micros)
def export_broker_timedRetransmitCount_by_consumer_as_dataframe(self):
return self._export_broker_metric_by_consumer_as_dataframe('timedRetransmitCount')
def export_broker_uptime_by_consumer_as_dataframe(self):
return self._export_broker_metric_by_consumer_as_dataframe('uptime')
def export_broker_node_distinct_latencies_as_dataframe(self, col_name:str ="run"):
return pd.DataFrame(data={col_name: self.run.export_broker_node_distinct_latencies()})
def export_latency_node_distinct_latencies_as_dataframe(self, col_name:str ="run"):
return pd.DataFrame(data={col_name: self.run.export_latency_node_distinct_latencies()})
def export_latency_node_series_latencies_metrics_as_dataframe(self):
return pd.DataFrame(data=self.export_latency_node_series_latencies_metrics())
def export_broker_node_series_latencies_metrics_as_dataframe(self):
return pd.DataFrame(data=self.export_broker_node_series_latencies_metrics())
def export_broker_node_series_latencies_metrics(self):
result = dict()
#quantiles
percentiles = list(d_latency_percentile.values())
lat_dict =self.run.export_broker_node_distinct_latencies_per_sample()
for key, value in lat_dict.items():
tmp_df = pd.DataFrame(data={"sample":value})
tmp_quantiles = tmp_df['sample'].quantile(q=percentiles)
#self.add_to_dict(result,k_latency_minimum, tmp_df['sample'].min())
#self.add_to_dict(result,k_latency_maximum, tmp_df['sample'].max())
self.add_to_dict(result,k_latency_average, tmp_df['sample'].mean())
for map_key,map_percentile in d_latency_percentile.items():
self.add_to_dict(result,map_key, tmp_quantiles[map_percentile])
return result
def export_latency_node_series_latencies_metrics(self):
result = dict()
#quantiles
percentiles = list(d_latency_percentile.values())
lat_dict =self.run.export_latency_node_distinct_latencies_per_sample()
for key, value in lat_dict.items():
tmp_df = pd.DataFrame(data={"sample":value})
tmp_quantiles = tmp_df['sample'].quantile(q=percentiles)
#self.add_to_dict(result,k_latency_minimum, tmp_df['sample'].min())
#self.add_to_dict(result,k_latency_maximum, tmp_df['sample'].max())
self.add_to_dict(result,k_latency_average, tmp_df['sample'].mean())
for map_key,map_percentile in d_latency_percentile.items():
self.add_to_dict(result,map_key, tmp_quantiles[map_percentile])
return result
def export_combined_all_distinct_latencies_metrics(self) -> dict:
"""
Calculates metrics (min, max, mean, percentiles) for broker and latency nodes
:return: dict ['metrics"]['latency-node']['broker-node']
"""
percentiles = list(d_latency_percentile.values())
ln_latencies = self.run.export_latency_node_distinct_latencies()
bn_latencies = self.run.export_broker_node_distinct_latencies()
tmp_df = pd.DataFrame(data={"latencies":ln_latencies})
tmp_quantiles = tmp_df['latencies'].quantile(q=percentiles)
list_metrics = [k_latency_minimum,k_latency_average]
list_metrics += list(d_latency_percentile.keys())
list_metrics.append(k_latency_maximum)
list_latency_node = list()
list_latency_node.append(tmp_df['latencies'].min())
list_latency_node.append(tmp_df['latencies'].mean())
for map_key,map_percentile in d_latency_percentile.items():
list_latency_node.append(tmp_quantiles[map_percentile])
list_latency_node.append(tmp_df['latencies'].max())
tmp_df = | pd.DataFrame(data={"latencies":bn_latencies}) | pandas.DataFrame |
import warnings
warnings.simplefilter('ignore', UserWarning)
from binance.client import Client
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import ttk
import requests
import sqlite3
from bs4 import BeautifulSoup
import webbrowser
def popupmsg(msg):
popup = tk.Tk()
popup.wm_title("!")
windowWidth = popup.winfo_reqwidth()
windowHeight = popup.winfo_reqheight()
positionRight = int(popup.winfo_screenwidth() / 2 - windowWidth / 2)
positionDown = int(popup.winfo_screenheight() / 2 - windowHeight / 2)
popup.geometry("+{}+{}".format(positionRight, positionDown))
label = ttk.Label(popup, text=msg, font=("Verdana", 12), takefocus=True, anchor='center', wraplength=400,
justify=tk.CENTER)
label.pack(side="top", fill="x", pady=15, padx=50)
B1 = ttk.Button(popup, text="Okay", command=popup.destroy)
B1.pack(pady=10)
popup.mainloop()
class LoginPage:
def __init__(self, master):
global client
self.master = master
self.frame = tk.Frame(self.master)
self.master.configure(padx=60, pady=20)
self.db = sqlite3.connect('UserKeys.db')
self.cur = self.db.cursor()
self.cur.execute('CREATE TABLE IF NOT EXISTS keys (api_key text, api_secret text)')
self.db.commit()
self.db.close()
self.top_label = tk.Label(self.master, text='Please sign in!', padx=10, pady=10)
self.api_key_label = tk.Label(self.master, text='Api Key: ', padx=7, pady=10)
self.api_key_entry = ttk.Entry(self.master, width=30)
self.api_secret_label = tk.Label(self.master, text='Api Secret: ', padx=10, pady=10)
self.api_secret_entry = ttk.Entry(self.master, width=30, show='*')
self.var = tk.IntVar()
self.remember_checkmark = tk.Checkbutton(self.master, text='Remember me', pady=10, variable=self.var)
self.submit_butt = tk.Button(self.master, text='Submit', width=10, padx=7, pady=5, relief='groove',
command=lambda: [self.add_keys(), self.new_window()])
self.db = sqlite3.connect('UserKeys.db')
self.cur = self.db.cursor()
self.cur.execute('SELECT * FROM keys')
self.the_keys001 = self.cur.fetchall()
for k in self.the_keys001:
api_key22 = k[0]
api_secret22 = k[1]
self.api_key_entry.insert(0, api_key22)
self.api_secret_entry.insert(0, api_secret22)
client = Client(api_key22, api_secret22, {'timeout': 20})
self.db.commit()
self.db.close()
self.layout()
def layout(self):
self.top_label.grid(row=0, columnspan=2)
self.api_key_label.grid(row=1, column=0)
self.api_key_entry.grid(row=1, column=1)
self.api_secret_label.grid(row=2, column=0)
self.api_secret_entry.grid(row=2, column=1)
self.remember_checkmark.grid(row=3, columnspan=2)
self.submit_butt.grid(row=4, columnspan=2)
def exit_app(self):
self.newWindow.destroy()
self.newWindow.quit()
def new_window(self):
self.master.withdraw()
self.newWindow = tk.Toplevel(self.master)
self.newWindow.protocol("WM_DELETE_WINDOW", self.exit_app)
self.newWindow.resizable(width=False, height=False)
bb = MainApplication(self.newWindow)
def add_keys(self):
global client
db = sqlite3.connect('UserKeys.db')
cur = db.cursor()
if self.var.get() == 1:
cur.execute('DELETE FROM keys')
db.commit()
cur.execute('INSERT INTO keys VALUES (:api_key, :api_secret)',
{
'api_key': self.api_key_entry.get(),
'api_secret': self.api_secret_entry.get()
})
db.commit()
cur.execute('SELECT * FROM keys')
the_keys = cur.fetchall()
for k in the_keys:
api_key55 = k[0]
api_secret55 = k[1]
client = Client(api_key55, api_secret55, {'timeout': 20})
db.commit()
else:
api_key77 = self.api_key_entry.get()
api_secret77 = self.api_secret_entry.get()
client = Client(api_key77, api_secret77, {'timeout': 20})
db.close()
interval = '1m'
datetime_format = "%I:%M %p"
limit = 60
symbol = 'BTCUSDT'
buy_symbol_market = 'USDT'
sell_symbol_market = 'BTC'
style.use('custom_light_style.mpltstyle')
class Header(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.frame1 = tk.LabelFrame(self.parent)
self.testing_data = client.get_ticker(symbol=symbol)
self.last_price_header = tk.Label(self.frame1, text='Last Price')
self.change_header = tk.Label(self.frame1, text='24h Change')
self.high_price_header = tk.Label(self.frame1, text='24h High')
self.low_price_header = tk.Label(self.frame1, text='24h Low')
self.volume_header = tk.Label(self.frame1, text='24h Volume')
self.last_price_data = tk.Label(self.frame1, text='{}'.format(self.testing_data['lastPrice']))
self.change_data = tk.Label(self.frame1, text='{} / {}%'.format(self.testing_data['priceChange'],
self.testing_data['priceChangePercent']))
self.high_price_data = tk.Label(self.frame1, text='{}'.format(self.testing_data['highPrice']))
self.low_price_data = tk.Label(self.frame1, text='{}'.format(self.testing_data['lowPrice']))
self.volume_price_data = tk.Label(self.frame1, text='{:,.2f}'.format(float(self.testing_data['volume'])))
header_text_1 = [self.last_price_header, self.change_header, self.high_price_header, self.low_price_header,
self.volume_header]
header_text_2 = [self.last_price_data, self.change_data, self.high_price_data, self.low_price_data,
self.volume_price_data]
self.frame1.configure(bg='white')
for f in header_text_1:
f.configure(padx=20, font='Helvetica 8', bg='white', fg='#383838')
for f in header_text_2:
f.configure(font='Helvetica 9 bold', bg='#FFFFFF', fg='#000000')
self.layout()
def update_header_info(self):
testing_data = client.get_ticker(symbol=symbol)
if float(testing_data['lastPrice']) > 1:
self.last_price_data['text'] = round(float(testing_data['lastPrice']), 4)
self.change_data['text'] = '{} / {}%'.format(round(float(testing_data['priceChange']), 4),
testing_data['priceChangePercent'])
self.high_price_data['text'] = round(float(testing_data['highPrice']), 4)
self.low_price_data['text'] = round(float(testing_data['lowPrice']), 4)
else:
self.last_price_data['text'] = testing_data['lastPrice']
self.change_data['text'] = '{} / {}%'.format(testing_data['priceChange'], testing_data['priceChangePercent'])
self.high_price_data['text'] = testing_data['highPrice']
self.low_price_data['text'] = testing_data['lowPrice']
formatted_vol = '{:,.2f}'.format(float(testing_data['volume']))
self.volume_price_data['text'] = formatted_vol
self.parent.after(20000, self.update_header_info)
def layout(self):
self.parent.grid()
self.frame1.grid(row=0, columnspan=3)
self.last_price_header.grid(row=0, column=0)
self.change_header.grid(row=0, column=1)
self.high_price_header.grid(row=0, column=2)
self.low_price_header.grid(row=0, column=3)
self.volume_header.grid(row=0, column=4)
self.last_price_data.grid(row=1, column=0)
self.change_data.grid(row=1, column=1)
self.high_price_data.grid(row=1, column=2)
self.low_price_data.grid(row=1, column=3)
self.volume_price_data.grid(row=1, column=4)
class Controls(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.main_frame = tk.Frame(self.parent)
self.graph_frame = tk.LabelFrame(self.main_frame, borderwidth=5, text=' Graph Controls ', labelanchor='n', padx=5, pady=2)
self.perf_frame = tk.LabelFrame(self.main_frame, borderwidth=5, text=' Top Performers ', labelanchor='n', pady=5, padx=11)
self.news_frame = tk.LabelFrame(self.main_frame, borderwidth=5, text=' News ', labelanchor='n', pady=2, padx=6)
self.btc_pairs = []
self.eth_pairs = []
self.usdt_pairs = []
self.list_of_syms = []
self.list_of_change = []
self.list_of_vol = []
self.learn_about = client.get_ticker()
self.all_symbols = client.get_all_tickers()
self.watch_list_list = sorted(self.learn_about, key=lambda i: float(i['priceChangePercent']), reverse=True)[0:7]
for f in self.all_symbols:
symbols = f['symbol']
if symbols.endswith('BTC'):
self.btc_pairs += [symbols]
if symbols.endswith('ETH'):
self.eth_pairs += [symbols]
if symbols.endswith('USDT'):
self.usdt_pairs += [symbols]
for i in range(0, 6):
watchlist_sym = self.watch_list_list[i]['symbol']
self.list_of_syms += [watchlist_sym]
watchlist_change = round(float(self.watch_list_list[i]['priceChangePercent']), 2)
self.list_of_change += [watchlist_change]
watchlist_vol = '{:,.0f}'.format(round(float(self.watch_list_list[i]['volume']), 0) / 1000)
self.list_of_vol += [watchlist_vol]
# Graph controls
self.graph_controls_000 = ttk.Label(self.graph_frame, text="Interval:")
self.graph_controls_001 = tk.Button(self.graph_frame, text="1h", command=lambda: self.button_changing(1))
self.graph_controls_002 = tk.Button(self.graph_frame, text="5h", command=lambda: self.button_changing(2))
self.graph_controls_003 = tk.Button(self.graph_frame, text="12h", command=lambda: self.button_changing(3))
self.graph_controls_004 = tk.Button(self.graph_frame, text="24h", command=lambda: self.button_changing(4))
self.graph_combobox1 = ttk.Combobox(self.graph_frame, values=['2h', '4h', '6h', '8h'], width=8,
state='readonly')
self.graph_combobox2 = ttk.Combobox(self.graph_frame, values=['7d', '30d', '3M', '6M', '1y'], width=8,
state='readonly')
self.graph_combobox1.set('2h')
self.graph_combobox2.set('7d')
self.graph_combobox1.bind('<<ComboboxSelected>>', self.changing_combobox)
self.graph_combobox2.bind('<<ComboboxSelected>>', self.changing_combobox22)
self.dark_mode_label = ttk.Label(self.graph_frame, text="Dark Mode:")
self.dark_mode_button = tk.Button(self.graph_frame, text="ON", command=self.parent.dark_mode_on)
self.dark_mode_button_off = tk.Button(self.graph_frame, text="OFF", command=self.parent.dark_mode_off)
# Top Performers
self.top_perf_button1 = tk.Button(self.perf_frame, text="{} +{}% {}k".format(self.list_of_syms[0], self.list_of_change[0],
self.list_of_vol[0]), width=27,
command=lambda: self.market_symbols(0), bg='#FFFFFF', fg='#000000', relief='raised')
self.top_perf_button2 = tk.Button(self.perf_frame, text="{} +{}% {}k".format(self.list_of_syms[1], self.list_of_change[1],
self.list_of_vol[1]), width=27,
command=lambda: self.market_symbols(1), bg='#FFFFFF', fg='#000000', relief='raised')
self.top_perf_button3 = tk.Button(self.perf_frame, text="{} +{}% {}k".format(self.list_of_syms[2], self.list_of_change[2],
self.list_of_vol[2]), width=27,
command=lambda: self.market_symbols(2), bg='#FFFFFF', fg='#000000', relief='raised')
self.top_perf_button4 = tk.Button(self.perf_frame, text="{} +{}% {}k".format(self.list_of_syms[3], self.list_of_change[3],
self.list_of_vol[3]), width=27,
command=lambda: self.market_symbols(3), bg='#FFFFFF', fg='#000000', relief='raised')
self.top_perf_button5 = tk.Button(self.perf_frame, text="{} +{}% {}k".format(self.list_of_syms[4], self.list_of_change[4],
self.list_of_vol[4]), width=27,
command=lambda: self.market_symbols(4), bg='#FFFFFF', fg='#000000', relief='raised')
self.top_perf_button6 = tk.Button(self.perf_frame, text="{} +{}% {}k".format(self.list_of_syms[5], self.list_of_change[5],
self.list_of_vol[5]), width=27,
command=lambda: self.market_symbols(5), bg='#FFFFFF', fg='#000000', relief='raised')
# News
self.times = []
self.links = []
self.titles = []
self.url = 'https://www.coindesk.com/news'
self.base = 'https://www.coindesk.com'
self.req = requests.get(self.url)
self.soup = BeautifulSoup(self.req.content, 'html.parser')
self.main_listbox = tk.Listbox(self.news_frame, height=9, width=32, bg='white', fg='#000000', selectbackground='gray',
activestyle='none')
sby = tk.Scrollbar(self.news_frame, width=13)
sbx = tk.Scrollbar(self.news_frame, orient=tk.HORIZONTAL, width=13)
sby.grid(column=1, sticky='ns')
sbx.grid(row=1, sticky='ew')
self.main_listbox.config(yscrollcommand=sby.set)
self.main_listbox.config(xscrollcommand=sbx.set)
sby.config(command=self.main_listbox.yview)
sbx.config(command=self.main_listbox.xview)
self.main_listbox.bind('<Double-Button-1>', self.news_article_open)
for h_four in self.soup.find_all('h4', {'class': 'heading'}):
self.titles.append(h_four.text)
for time in self.soup.find_all('time', {'class': 'time'}):
self.times.append(time.text)
for div in self.soup.find_all('div', {'class': 'text-content'}):
self.links.append(self.base + (div.find('a').next_sibling['href']))
for i in range(9):
for f in self.times, self.titles[10:19], self.links:
self.main_listbox.insert(tk.END, f[i])
self.main_listbox.insert(tk.END, '---')
self.layout()
def market_symbols(self, id):
global symbol, buy_symbol_market, sell_symbol_market
symbol = self.list_of_syms[id]
if self.list_of_syms[id].endswith('BTC'):
buy_symbol_market = 'BTC'
if self.list_of_syms[id].endswith('ETH'):
buy_symbol_market = 'ETH'
if self.list_of_syms[id].endswith('USDT'):
buy_symbol_market = 'USDT'
if self.list_of_syms[id] in self.btc_pairs:
sell_symbol_market = self.list_of_syms[id].replace('BTC', '')
if self.list_of_syms[id] in self.eth_pairs:
sell_symbol_market = self.list_of_syms[id].replace('ETH', '')
if self.list_of_syms[id] in self.usdt_pairs:
sell_symbol_market = self.list_of_syms[id].replace('USDT', '')
def button_changing(self, id):
global interval, datetime_format, limit
if id == 1:
interval = '1m'
datetime_format = "%I:%M %p"
limit = 60
if id == 2:
interval = '1m'
datetime_format = "%I:%M %p"
limit = 300
if id == 3:
interval = '5m'
datetime_format = "%I:%M %p"
limit = 144
if id == 4:
interval = '15m'
datetime_format = "%I:%M %p"
limit = 96
def changing_combobox(self, event):
global interval, datetime_format, limit
if self.graph_combobox1.get() == '2h':
interval = '1m'
datetime_format = "%I:%M %p"
limit = 120
elif self.graph_combobox1.get() == '4h':
interval = '1m'
datetime_format = "%I:%M %p"
limit = 240
elif self.graph_combobox1.get() == '6h':
interval = '5m'
datetime_format = "%I:%M %p"
limit = 72
else:
interval = '5m'
datetime_format = "%I:%M %p"
limit = 96
def changing_combobox22(self, event):
global interval, datetime_format, limit
if self.graph_combobox2.get() == '7d':
interval = '2h'
datetime_format = "%m/%d"
limit = 84
elif self.graph_combobox2.get() == '30d':
interval = '12h'
datetime_format = "%m/%d"
limit = 60
elif self.graph_combobox2.get() == '3M':
interval = '1d'
datetime_format = "%m/%d/%y"
limit = 91
elif self.graph_combobox2.get() == '6M':
interval = '3d'
datetime_format = "%m/%d/%y"
limit = 60
else:
interval = '1w'
datetime_format = "%m/%Y"
limit = 52
def news_article_open(self, event):
weblink = self.main_listbox.get(tk.ACTIVE)
if weblink.startswith('https://'):
webbrowser.open_new(weblink)
def layout(self):
self.parent.grid()
self.main_frame.grid(row=1, column=0)
self.graph_frame.grid()
self.perf_frame.grid()
self.news_frame.grid()
# Graph controls layout
self.graph_controls_000.grid(row=0)
self.graph_controls_001.grid(row=0, column=1)
self.graph_controls_002.grid(row=0, column=2)
self.graph_controls_003.grid(row=1, column=1)
self.graph_controls_004.grid(row=1, column=2)
self.graph_combobox1.grid(row=2, column=1)
self.graph_combobox2.grid(row=2, column=2)
self.dark_mode_label.grid(row=3)
self.dark_mode_button.grid(row=3, column=1)
self.dark_mode_button_off.grid(row=3, column=2)
# Top performers layout
self.top_perf_button1.grid(row=1, columnspan=3)
self.top_perf_button2.grid(row=2, columnspan=3)
self.top_perf_button3.grid(row=3, columnspan=3)
self.top_perf_button4.grid(row=4, columnspan=3)
self.top_perf_button5.grid(row=5, columnspan=3)
self.top_perf_button6.grid(row=6, columnspan=3)
# News layout
self.main_listbox.grid(row=0)
class GraphContainer(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.draw_frame = tk.Frame(self.parent)
self.fig = Figure(facecolor='#cccccc')
self.a = self.fig.add_subplot(111)
self.layout()
def animate(self, i):
kline_data = client.get_klines(symbol=symbol, interval=interval, limit=limit)
animate_df = pd.DataFrame(kline_data).drop([7, 9, 10, 11], axis=1)
animate_df.columns = ['Open Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close Time', 'Num of Trades']
animate_df[['Open', 'High', 'Low', 'Close', 'Volume']] = animate_df[
['Open', 'High', 'Low', 'Close', 'Volume']].astype(
float)
animate_df['Open Time'] = | pd.to_datetime(animate_df['Open Time'], unit='ms') | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import assignment2_helper as helper
# Look pretty...
# matplotlib.style.use('ggplot')
plt.style.use('ggplot')
# Do * NOT * alter this line, until instructed!
scaleFeatures = True
#
file_path = "/Users/szabolcs/dev/git/DAT210x/Module4/Datasets/"
file_name = "kidney_disease.csv"
exclude_columns = ['id', 'classification'] #, 'rbc', 'pc', 'pcc', 'ba', 'htn', 'dm', 'cad', 'appet', 'pe', 'ane']
df = pd.read_csv(file_path + file_name)
labels = ['red' if i=='ckd' else 'green' for i in df.classification]
df.drop(exclude_columns, axis=1, inplace=True)
print(df.head())
df = pd.get_dummies(df, columns=["rbc"])
df = pd.get_dummies(df, columns=["pc"])
df = pd.get_dummies(df, columns=["pcc"])
df = pd.get_dummies(df, columns=["ba"])
df = pd.get_dummies(df, columns=["htn"])
df = pd.get_dummies(df, columns=["dm"])
df = pd.get_dummies(df, columns=["cad"])
df = pd.get_dummies(df, columns=["appet"])
df = pd.get_dummies(df, columns=["pe"])
df = pd.get_dummies(df, columns=["ane"])
df.pcv = pd.to_numeric(df.pcv, errors="coerce")
df.wc = | pd.to_numeric(df.wc, errors="coerce") | pandas.to_numeric |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'assessment'))
print(os.getcwd())
except:
pass
#%% [markdown]
# ### *IPCC SR15 scenario assessment*
#
# <img style="float: right; height: 120px; margin-top: 10px;" src="../_static/IIASA_logo.png">
# <img style="float: right; height: 100px;" src="../_static/IAMC_logo.jpg">
#
# # Scenario categorization and indicators
#
# This notebook assigns the categorization by warming outcome and computes a range of descriptive indicators
# for the scenario assessment of the IPCC's _"Special Report on Global Warming of 1.5°C"_.
# It generates a `sr15_metadata_indicators.xlsx` spreadsheet, which is used in other notebooks for this assessment
# for categorization and extracting descriptive indicators.
#
# ## Scenario ensemble data
#
# The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer).
#
# Bibliographic details of the scenario ensemble and all studies that contributed scenarios to the ensemble
# are included in this repository
# as [Endnote (enl)](../bibliography/iamc_1.5c_scenario_data.enl),
# [Reference Manager (ris)](../bibliography/iamc_1.5c_scenario_data.ris),
# and [BibTex (bib)](../bibliography/iamc_1.5c_scenario_data.bib) format.
#
# ## License and recommended citation
#
# This notebook is licensed under the Apache License, Version 2.0.
#
# Please refer to the [README](../README.md) for the recommended citation of the scenario ensemble and the notebooks in this repository.
#
# ***
#%% [markdown]
# ## Import dependencies and define general notebook settings
#%%
import math
import io
import yaml
import re
import pandas as pd
import numpy as np
from IPython.display import display
#%% [markdown]
# ### Introduction and tutorial for the `pyam` package
#
# This notebook (and all other analysis notebooks in this repository) uses the `pyam` package,
# an open-source Python package for IAM scenario analysis and visualization
# ([https://software.ene.iiasa.ac.at/pyam/](http://software.ene.iiasa.ac.at/pyam/)).
#
# For an introduction of the notation and features of the `pyam` package,
# please refer to [this tutorial](https://github.com/IAMconsortium/pyam/blob/master/doc/source/tutorials/pyam_first_steps.ipynb).
# It will take you through the basic functions and options used here,
# and provide further introduction and guidelines.
#%%
import pyam
logger = pyam.logger()
#%% [markdown]
# ### Import Matplotlib and set figure layout defaults in line with SR1.5 guidelines
#%%
import matplotlib.pyplot as plt
plt.style.use('style_sr15.mplstyle')
#%% [markdown]
# ## Import scenario snapshot and define auxiliary dictionaries
#
# This notebook only assigns indicator based on global timeseries data.
#
# The dictionary `meta_tables` is used to collect definitions
# of categories and secondary scenario classification throughout this script.
# These definitions are exported to the metadata/categorization Excel workbook
# at the end of the script for completeness.
# The dictionary `meta_docs` collects definitions used for the documentation tags
# in the online scenario explorer.
#
# The dictionary `specs` collects lists and the run control specifications to be exported to JSON
# and used by other notebooks for the SR1.5 scenario analysis.
#
# The `plotting_args` dictionary assigns the default plotting arguemnts in this notebook.
#%%
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r1.1.xlsx')
#%%
meta_tables = {}
meta_docs = {}
#%%
specs = {}
#%%
plotting_args = {'color': 'category', 'linewidth': 0.2}
specs['plotting_args'] = plotting_args
#%% [markdown]
# ## Verify completeness of scenario submissions for key variables
#
# Verify that every scenario except for *Shell Sky* and the historical reference scenarios reports CO2 Emissions in 2030.
#%%
sr1p5.require_variable(variable='Emissions|CO2', year=2030, exclude_on_fail=False)
#%% [markdown]
# ## Check MAGICC postprocessing prior to categorization
#
# Assign scenarios that could not be postprocessed by probabilistic MAGICC to respective categories:
# - data not available for full century
# - insufficient reporting of emission species
# - reference scenario
#%%
sr1p5.set_meta(name='category', meta= 'uncategorized')
#%%
reference = sr1p5.filter(model='Reference')
pd.DataFrame(index=reference.meta.index)
#%%
sr1p5.set_meta(meta='reference', name='category', index=reference)
#%%
no_climate_assessment = (
sr1p5.filter(category='uncategorized').meta.index
.difference(sr1p5.filter(year=2100, variable='Emissions|CO2').meta.index)
)
pd.DataFrame(index=no_climate_assessment)
#%%
sr1p5.set_meta(meta='no-climate-assessment', name='category', index=no_climate_assessment)
#%% [markdown]
# ## Categorization of scenarios
#
# This section applies the categorization of scenario as defined in Chapter 2 of the Special Report
# for unique assignment of scenarios.
#
# The category specification as agreed upon at LAM 3 in Malmö is repeated here for easier reference.
#
# The term $P_{x°C}$ refers to the probability of exceeding warming of $x°C$ throughout the century in at least one year
# and $P_{x°C}(y)$ refers to the probability of exceedance in a specific year $y$.
#
# |**Categories** |**Subcategories**|**Probability to exceed warming threshold**|**Acronym** |**Color** |
# |---------------|-----------------|-------------------------------------------|-----------------|----------------|
# | Below 1.5°C | Below 1.5°C (I) | $P_{1.5°C} \leq 0.34$ | Below 1.5C (I) | xkcd:baby blue |
# | | Below 1.5°C (II)| $0.34 < P_{1.5°C} \leq 0.50$ | Below 1.5C (II) | |
# | 1.5°C return with low OS | Lower 1.5°C return with low OS | $0.50 < P_{1.5°C} \leq 0.67$ and $P_{1.5°C}(2100) \leq 0.34$ |(Lower) 1.5C low OS | xkcd:bluish |
# | | Higher 1.5°C return with low OS | $0.50 < P_{1.5°C} \leq 0.67$ and $0.34 < P_{1.5°C}(2100) \leq 0.50$ |(Higher) 1.5C low OS | |
# | 1.5°C return with high OS | Lower 1.5°C return with high OS | $0.67 < P_{1.5°C}$ and $P_{1.5°C}(2100) \leq 0.34$ | (Lower) 1.5C high OS | xkcd:darkish blue |
# | | Higher 1.5°C return with high OS | $0.67 < P_{1.5°C}$ and $0.34 < P_{1.5°C}(2100) \leq 0.50$ | (Higher) 1.5C high OS | |
# | Lower 2.0°C | | $P_{2.0°C} \leq 0.34$ (excluding above) | Lower 2C | xkcd:orange |
# | Higher 2.0°C | | $0.34 < P_{2.0°C} \leq 0.50$ (excluding above) | Higher 2C | xkcd:red |
# | Above 2.0°C | | $P_{2.0°C} > 0.50$ for at least 1 year | Above 2C | darkgrey |
#%% [markdown]
# ### Category definitions to Excel
#
# The following dictionary repeats the category definitions from the table above
# and saves them as a `pandas.DataFrame` to a dictionary `meta_tables`.
# Throughout the notebook, this dictionary is used to collect definitions
# of categories and secondary scenario classification.
# These definitions are exported to the metadata/categorization Excel workbook
# at the end of the script for easy reference.
#%%
dct = {'Categories of scenarios':
['Below 1.5°C',
'',
'1.5°C return with low overshoot',
'',
'1.5°C return with high overshoot',
'',
'Lower 2.0°C',
'Higher 2.0°C',
'Above 2.0°C'],
'Subcategories':
['Below 1.5°C (I)',
'Below 1.5°C (II)',
'Lower 1.5°C return with low overshoot',
'Higher 1.5°C return with low overshoot',
'Lower 1.5°C return with high overshoot',
'Higher 1.5°C return with high overshoot',
'',
'',
''],
'Criteria for assignment to category':
['P1.5°C ≤ 0.34',
'0.34 < P1.5°C ≤ 0.50',
'0.50 < P1.5°C ≤ 0.67 and P1.5°C(2100) ≤ 0.34',
'0.50 < P1.5°C ≤ 0.67 and 0.34 < P1.5°C(2100) ≤ 0.50',
'0.67 < P1.5°C and P1.5°C(2100) ≤ 0.34',
'0.67 < P1.5°C and 0.34 < P1.5°C(2100) ≤ 0.50',
'P2.0°C ≤ 0.34 (excluding above)',
'0.34 < P2.0°C ≤ 0.50 (excluding above)',
'P2.0°C > 0.50 during at least 1 year'
],
'Acronym':
['Below 1.5C (I)',
'Below 1.5C (II)',
'Lower 1.5C low overshoot',
'Higher 1.5C low overshoot',
'Lower 1.5C high overshoot',
'Higher 1.5C high overshoot',
'Lower 2C',
'Higher 2C',
'Above 2C'],
'Color':
['xkcd:baby blue',
'',
'xkcd:bluish',
'',
'xkcd:darkish blue',
'',
'xkcd:orange',
'xkcd:red',
'darkgrey']
}
cols = ['Categories of scenarios', 'Subcategories', 'Criteria for assignment to category', 'Acronym', 'Color']
categories_doc = pd.DataFrame(dct)[cols]
meta_tables['categories'] = categories_doc
meta_docs['category'] = 'Categorization of scenarios by global warming impact'
meta_docs['subcategory'] = 'Sub-categorization of scenarios by global warming impact'
#%%
other_cats = ['no-climate-assessment', 'reference']
cats = ['Below 1.5C', '1.5C low overshoot', '1.5C high overshoot', 'Lower 2C', 'Higher 2C', 'Above 2C']
all_cats = cats + other_cats
subcats = dct['Acronym']
all_subcats = subcats + other_cats
#%%
specs['cats'] = cats
specs['all_cats'] = all_cats
specs['subcats'] = subcats
specs['all_subcats'] = all_subcats
#%% [markdown]
# ### Subcategory assignment
#
# We first assign the subcategories, then aggregate those assignment to the main categories.
# The categories assigned above to indicate reasons for non-processing by MAGICC are copied over to the subcategories.
#
# Keep in mind that setting a category will re-assign scenarios (in case they have already been assigned).
# So in case of going back and forth in this notebook (i.e., not executing the cells in the correct order),
# make sure to reset the categorization.
#%%
def warming_exccedance_prob(x):
return 'AR5 climate diagnostics|Temperature|Exceedance Probability|{} °C|MAGICC6'.format(x)
expected_warming = 'AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|Expected value'
median_warming = 'AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|MED'
#%%
sr1p5.set_meta(meta=sr1p5['category'], name='subcategory')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Below 1.5C (I)', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.34}},
color='xkcd:baby blue')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Below 1.5C (II)', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.50}},
color='xkcd:baby blue')
#%% [markdown]
# To categorize by a variable using multiple filters (here: less than 66% probability of exceeding 1.5°C at any point during the century and less than 34% probability of exceeding that threshold in 2100) requires to perform the assignment in three steps - first, categorize to an intermediate `low OS` category and, in a second step, assign to the category in question. The third step resets all scenarios still categorized as intermediate after the second step back to `uncategorized`.
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='low overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.67}})
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='low overshoot',
value='Lower 1.5C low overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.34, 'year': 2100}},
color='xkcd:bluish')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='low overshoot',
value='Higher 1.5C low overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.50, 'year': 2100}},
color='xkcd:bluish')
#%% [markdown]
# Display scenarios that satisfy the `low overshoot` criterion
# but are not assigned to `Lower 1.5C lower overshoot` or `Higher 1.5C lower overshoot`.
# Then, reset them to uncategorized.
#%%
sr1p5.filter(subcategory='low overshoot').meta
#%%
sr1p5.set_meta(meta='uncategorized', name='subcategory', index=sr1p5.filter(subcategory='low overshoot'))
#%% [markdown]
# Determine all scenarios with a probability to exceed 1.5°C greater than 66% in any year throughout the century.
# The function `categorize()` cannot be used for this selection, because it would either check for the criteria being true for all years or for a particular year.
#%%
df = sr1p5.filter(exclude=False, subcategory='uncategorized', variable=warming_exccedance_prob(1.5)).timeseries()
sr1p5.set_meta(meta='high overshoot', name='subcategory',
index=df[df.apply(lambda x: max(x), axis=1) > 0.66].index)
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='high overshoot',
value='Lower 1.5C high overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.34, 'year': 2100}},
color='xkcd:darkish blue')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='high overshoot',
value='Higher 1.5C high overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.50, 'year': 2100}},
color='xkcd:darkish blue')
#%% [markdown]
# Reset scenarios that satisfy the `high overshoot` criterion
# but are not assigned to `Lower 1.5C high overshoot` or `Higher 1.5C high overshoot`.
#%%
sr1p5.set_meta(meta='uncategorized', name='subcategory', index=sr1p5.filter(subcategory='high overshoot'))
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Lower 2C', name='subcategory',
criteria={warming_exccedance_prob(2.0): {'up': 0.34}},
color='xkcd:orange')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Higher 2C', name='subcategory',
criteria={warming_exccedance_prob(2.0): {'up': 0.50}},
color='xkcd:red')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Above 2C', name='subcategory',
criteria={warming_exccedance_prob(2.0): {'up': 1.0}},
color='darkgrey')
#%% [markdown]
# ### Aggregation of subcategories to categories
#%%
rc = pyam.run_control()
def assign_rc_color_from_sub(cat, sub):
rc.update({'color': {'category': {cat: rc['color']['subcategory'][sub]}}})
#%%
sr1p5.set_meta(meta='Below 1.5C', name='category',
index=sr1p5.filter(subcategory=['Below 1.5C (I)', 'Below 1.5C (II)']).meta.index)
assign_rc_color_from_sub('Below 1.5C', 'Below 1.5C (II)')
#%%
sr1p5.set_meta(meta='1.5C low overshoot', name='category',
index=sr1p5.filter(subcategory=['Lower 1.5C low overshoot', 'Higher 1.5C low overshoot']))
assign_rc_color_from_sub('1.5C low overshoot', 'Lower 1.5C low overshoot')
#%%
sr1p5.set_meta(meta='1.5C high overshoot', name='category',
index=sr1p5.filter(subcategory=['Lower 1.5C high overshoot', 'Higher 1.5C high overshoot']))
assign_rc_color_from_sub('1.5C high overshoot', 'Lower 1.5C high overshoot')
#%%
cats_non15 = ['Lower 2C', 'Higher 2C', 'Above 2C']
df_2c = sr1p5.filter(subcategory=cats_non15)
sr1p5.set_meta(meta=df_2c['subcategory'], name='category')
for c in cats_non15:
assign_rc_color_from_sub(c, c)
#%% [markdown]
# ### Additional assessment of categorization
#
# Check whether there are any scenarios that return to 1.5°C by the end of the century and exceed the 2°C threshold with a likelyhood higher than 34% or 50% (i.e., the `Lower 2C` or the `Higher 2C` categories respectively). Having scenario categorized as `1.5C` but with a higher-than-50% probability of exceeding 2°C at some point in the century may need to be considered separately in subsequent assessment.
#%%
cats_15 = ['Below 1.5C', '1.5C low overshoot', '1.5C high overshoot']
specs['cats_15'] = cats_15
#%%
cats_15_no_lo = ['Below 1.5C', '1.5C low overshoot']
specs['cats_15_no_lo'] = cats_15_no_lo
#%%
cats_2 = ['Lower 2C', 'Higher 2C']
specs['cats_2'] = cats_2
#%%
df = sr1p5.filter(exclude=False, category=cats_15, variable=warming_exccedance_prob(2.0)).timeseries()
ex_prob_2 = df.apply(lambda x: max(x))
#%%
if max(ex_prob_2) > 0.34:
logger.warning('The following 1.5C-scenarios are not `Lower 2C` scenarios:')
display(df[df.apply(lambda x: max(x), axis=1) > 0.34])
#%%
if max(ex_prob_2) > 0.50:
logger.warning('The following 1.5C-scenarios are not `2C` scenarios:')
display(df[df.apply(lambda x: max(x), axis=1) > 0.50])
#%% [markdown]
# ### Counting and evaluation of scenario assignment categories
#
# Count the number of scenarios assigned to each category.
#
# This table is the basis for **Tables 2.1 and 2.A.11** in the SR1.5.
#%%
lst = sr1p5.meta.groupby(['category', 'subcategory']).count()
(
lst
.reindex(all_cats, axis='index', level=0)
.reindex(all_subcats, axis='index', level=1)
.rename(columns={'exclude': 'count'})
)
#%% [markdown]
# Check whether any scenarios are still marked as `uncategorized`. This may be due to missing MAGICC postprocessing.
#%%
if any(sr1p5['category'] == 'uncategorized'):
logger.warning('There are scenarios that are no yet categorized!')
display(sr1p5.filter(category='uncategorized').meta)
#%% [markdown]
# ## Validation of Kyoto GHG emissions range (SAR-GWP100)
#
# Validate all scenarios whther aggregate Kyoto gases are outside the range as assessed by the Second Assessment Report (SAR) using the Global Warming Potential over 100 years (GWP100). These scenarios are excluded from some figures and tables in the assessment.
#%%
invalid_sar_gwp = sr1p5.validate(criteria={'Emissions|Kyoto Gases (SAR-GWP100)':
{'lo': 44500, 'up': 53500, 'year':2010}}, exclude_on_fail=False)
#%%
name='Kyoto-GHG|2010 (SAR)'
sr1p5.set_meta(meta='in range', name=name)
sr1p5.set_meta(meta='exclude', name=name, index=invalid_sar_gwp)
meta_docs[name] = 'Indicator whether 2010 Kyoto-GHG reported by the scenario (as assessed by IPCC SAR) are in the valid range'
#%% [markdown]
# ## Assignment of baseline scenarios
#
# This section assigns a `baseline` reference for scenarios from selected model intercomparison projects and indivitual submissions.
#%%
def set_baseline_reference(x):
m, s = (x.name[0], x.name[1])
b = None
if s.startswith('SSP') and not 'Baseline' in s:
b = '{}Baseline'.format(s[0:5])
if s.startswith('CD-LINKS') and not 'NoPolicy' in s:
b = '{}NoPolicy'.format(s[0:9])
if s.startswith('EMF33') and not 'Baseline' in s:
b = '{}Baseline'.format(s[0:6])
if s.startswith('ADVANCE') and not 'NoPolicy' in s:
b = '{}NoPolicy'.format(s[0:8])
if s.startswith('GEA') and not 'base' in s:
b = '{}base'.format(s[0:8])
if s.startswith('TERL') and not 'Baseline' in s:
b = s.replace('15D', 'Baseline').replace('2D', 'Baseline')
if s.startswith('SFCM') and not 'Baseline' in s:
b = s.replace('1p5Degree', 'Baseline').replace('2Degree', 'Baseline')
if s.startswith('CEMICS') and not s == 'CEMICS-Ref':
b = 'CEMICS-Ref'
if s.startswith('SMP') and not 'REF' in s:
if s.endswith('Def') or s.endswith('regul'):
b = 'SMP_REF_Def'
else:
b = 'SMP_REF_Sust'
if s.startswith('DAC'):
b = 'BAU'
# check that baseline scenario exists for specific model `m`
if (m, b) in sr1p5.meta.index:
return b
# else (or if scenario name not in list above), return None
return None
#%%
name = 'baseline'
sr1p5.set_meta(sr1p5.meta.apply(set_baseline_reference, raw=True, axis=1), name)
meta_docs[name] = 'Name of the respective baseline (or reference/no-policy) scenario'
#%% [markdown]
# ## Assignent of marker scenarios
#
# The following scenarios are used as marker throughout the analysis and visualization, cf. Figure 2.7 (SOD):
#
# |**Marker** |**Model & scenario name** |**Reference** | **Symbol** |
# |------------|--------------------------------|-------------------------------|-----------------|
# | *S1* | AIM/CGE 2.0 / SSP1-19 | Fujimori et al., 2017 | `white square` |
# | *S2* | MESSAGE-GLOBIOM 1.0 / SSP2-19 | Fricko et al., 2017 | `yellow square` |
# | *S5* | REMIND-MAgPIE 1.5 / SSP5-19 | Kriegler et al., 2017 | `black square` |
# | *LED* | MESSAGEix-GLOBIOM 1.0 / LowEnergyDemand | Grubler et al., 2018 | `white circle` |
#%%
dct = {'Marker':
['S1',
'S2',
'S5',
'LED'],
'Model and scenario name':
['AIM/CGE 2.0 / SSP1-19',
'MESSAGE-GLOBIOM 1.0 / SSP2-19',
'REMIND-MAgPIE 1.5 / SSP5-19',
'MESSAGEix-GLOBIOM 1.0 / LowEnergyDemand'],
'Reference':
['Fujimori et al., 2017',
'Fricko et al., 2017',
'Kriegler et al., 2017',
'Grubler et al., 2018'],
'Symbol':
['white square',
'yellow square',
'black square',
'white circle']
}
cols = ['Marker', 'Model and scenario name', 'Reference', 'Symbol']
markers_doc = pd.DataFrame(dct)[cols]
meta_tables['marker scenarios'] = markers_doc
meta_docs['marker'] = 'Illustrative pathways (marker scenarios)'
#%%
specs['marker'] = ['S1', 'S2', 'S5', 'LED']
#%%
sr1p5.set_meta('', 'marker')
rc.update({'marker': {'marker': {'': None}}})
#%%
m = 'S1'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='AIM/CGE 2.0', scenario='SSP1-19'))
rc.update({'marker': {'marker': {m: 's'}},
'c': {'marker': {m: 'white'}},
'edgecolors': {'marker': {m: 'black'}}}
)
#%%
m = 'S2'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='MESSAGE-GLOBIOM 1.0', scenario='SSP2-19'))
rc.update({'marker': {'marker': {m: 's'}},
'c': {'marker': {m: 'yellow'}},
'edgecolors': {'marker': {m: 'black'}}})
#%%
m = 'S5'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='REMIND-MAgPIE 1.5', scenario='SSP5-19'))
rc.update({'marker': {'marker': {m: 's'}},
'c': {'marker': {m: 'black'}},
'edgecolors': {'marker': {m: 'black'}}})
#%%
m = 'LED'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='MESSAGEix-GLOBIOM 1.0', scenario='LowEnergyDemand'))
rc.update({'marker': {'marker': {m: 'o'}},
'c': {'marker': {m: 'white'}},
'edgecolors': {'marker': {m: 'black'}}})
#%% [markdown]
# ## Visual analysis of emission and temperature pathways by category
#
# First, we plot all carbon dioxide emissions trajectories colored by category and the CO2 emissions from the AFOLU sector. Then, show the warming trajectories by category.
#%%
horizon = list(range(2000, 2020, 5)) + list(range(2020, 2101, 10))
df = sr1p5.filter(year=horizon)
#%%
df.filter(exclude=False, variable='Emissions|CO2').line_plot(**plotting_args, marker='marker')
#%%
df.filter(exclude=False, variable='Emissions|CO2|AFOLU').line_plot(**plotting_args, marker='marker')
#%%
df.filter(exclude=False, variable=expected_warming).line_plot(**plotting_args, marker='marker')
#%% [markdown]
# ## Import scientific references and publication status
# The following block reads in an Excel table with the details of the scientific references for each scenario.
#
# The main cell of this section loops over all entries in this Excel table, filters for the relevant scenarios,
# and assigns a short reference and the publication status. If multiple references are relevant for a scenario, the references are compiled, and the 'highest' publication status is written to the metadata.
#%%
ref_cols = ['project', 'model', 'scenario', 'reference', 'doi', 'bibliography']
#%%
sr1p5.set_meta('undefined', 'reference')
sr1p5.set_meta('unknown', 'project')
#%%
refs = pd.read_csv('../bibliography/scenario_references.csv', encoding='iso-8859-1')
_refs = {'index': []}
for i in ref_cols:
_refs.update({i.title(): []})
#%%
for cols in refs.iterrows():
c = cols[1]
filters = {}
# check that filters are defined
if c.model is np.NaN and c.scenario is np.NaN:
logger.warn('project `{}` on line {} has no filters assigned'
.format(c.project, cols[0]))
continue
# filter for scenarios to apply the project and publication tags
filters = {}
for i in ['model', 'scenario']:
if c[i] is not np.NaN:
if ";" in c[i]:
filters.update({i: re.sub(";", "", c[i]).split()})
else:
filters.update({i: c[i]})
df = sr1p5.filter(**filters)
if df.scenarios().empty:
logger.warn('no scenarios satisfy filters for project `{}` on line {} ({})'
.format(c.project, cols[0], filters))
continue
# write to meta-tables dictionary
_refs['index'].append(cols[0])
for i in ref_cols:
_refs[i.title()].append(c[i])
sr1p5.meta.loc[df.meta.index, 'project'] = c['project']
for i in df.meta.index:
r = c['reference']
sr1p5.meta.loc[i, 'reference'] = r if sr1p5.meta.loc[i, 'reference'] == 'undefined' else '{}; {}'.format(sr1p5.meta.loc[i, 'reference'], r)
#%%
cols = [i.title() for i in ref_cols]
meta_tables['references'] = | pd.DataFrame(_refs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pytest
from pandas.api.types import is_categorical_dtype
from _helpers import assert_array_nan_equal
from cellrank.tools import Lineage
from cellrank.tools._utils import (
_one_hot,
_process_series,
_fuzzy_to_discrete,
_merge_categorical_series,
_series_from_one_hot_matrix,
)
from cellrank.tools._colors import _map_names_and_colors
class TestToolsUtils:
def test_merge_not_categorical(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"])
with pytest.raises(TypeError):
_ = _merge_categorical_series(x, y)
def test_merge_different_index(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"], index=[5, 4, 3, 2, 1]).astype(
"category"
)
with pytest.raises(ValueError):
_ = _merge_categorical_series(x, y)
def test_merge_normal_run(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
expected = pd.Series(["b", "b", "a", "d", "a"]).astype("category")
res = _merge_categorical_series(x, y, inplace=False)
np.testing.assert_array_equal(res.values, expected.values)
def test_merge_normal_run_inplace(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
expected = pd.Series(["b", "b", "a", "d", "a"]).astype("category")
_ = _merge_categorical_series(x, y, inplace=True)
assert _ is None
np.testing.assert_array_equal(x.values, expected.values)
def test_merge_normal_run_completely_different_categories(self):
x = pd.Series(["a", "a", "a"]).astype("category")
y = pd.Series(["b", "b", "b"]).astype("category")
expected = pd.Series(["b", "b", "b"]).astype("category")
res = _merge_categorical_series(x, y, inplace=False)
np.testing.assert_array_equal(res.values, expected.values)
np.testing.assert_array_equal(res.cat.categories.values, ["b"])
def test_merge_colors_not_colorlike(self):
with pytest.raises(ValueError):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
colors_x = ["red", "foo"]
_ = _merge_categorical_series(x, y, colors_old=colors_x, inplace=True)
def test_merge_colors_wrong_number_of_colors(self):
with pytest.raises(ValueError):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
colors_x = ["red"]
_ = _merge_categorical_series(x, y, colors_old=colors_x, inplace=True)
def test_merge_colors_wrong_dict(self):
with pytest.raises(ValueError):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
colors_x = {"a": "red", "foo": "blue"}
_ = _merge_categorical_series(x, y, colors_old=colors_x, inplace=True)
def test_merge_colors_simple_old(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
colors_x = ["red", "blue"]
colors_merged = _merge_categorical_series(
x, y, colors_old=colors_x, inplace=True
)
np.testing.assert_array_equal(colors_merged, ["red", "blue", "#4daf4a"])
def test_merge_colors_simple_old_no_inplace(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
expected = pd.Series(["b", "b", "a", "d", "a"]).astype("category")
colors_x = ["red", "blue"]
merged, colors_merged = _merge_categorical_series(
x, y, colors_old=colors_x, inplace=False
)
np.testing.assert_array_equal(merged.values, expected.values)
np.testing.assert_array_equal(colors_merged, ["red", "blue", "#4daf4a"])
def test_merge_colors_simple_new(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
colors_y = ["red", "blue", "green"]
colors_merged = _merge_categorical_series(
x, y, colors_new=colors_y, inplace=True
)
np.testing.assert_array_equal(colors_merged, ["#e41a1c", "#377eb8", "green"])
def test_merge_colors_both(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
colors_x = ["red", "blue"]
colors_y = ["green", "yellow", "black"]
colors_merged = _merge_categorical_series(
x, y, colors_old=colors_x, colors_new=colors_y, inplace=True
)
np.testing.assert_array_equal(colors_merged, ["red", "blue", "black"])
def test_merge_colors_both_overwrite(self):
x = | pd.Series(["a", "b", np.nan, "b", np.nan]) | pandas.Series |
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import pandas as pd
import numpy as np
import sys, os, site, zipfile, math, time, json, io
import googlemaps, urllib, shapely, shutil, requests
import xml.etree.ElementTree as ET
from glob import glob
from urllib.error import HTTPError
from urllib.request import URLError
from http.client import IncompleteRead
from zipfile import BadZipFile
from tqdm import tqdm, trange
from warnings import warn
###########################
### IMPORT PROJECT PATH ###
import pvvm.settings
revmpath = pvvm.settings.revmpath
datapath = pvvm.settings.datapath
apikeys = pvvm.settings.apikeys
nsrdbparams = pvvm.settings.nsrdbparams
#####################
### Imports from pvvm
import pvvm.toolbox
import pvvm.io
#######################
### DICTS AND LISTS ###
#######################
isos = ['CAISO', 'ERCOT', 'MISO', 'PJM', 'NYISO', 'ISONE']
resolutionlmps = {
('CAISO', 'da'): 60, ('CAISO', 'rt'): 5,
('ERCOT', 'da'): 60, ('ERCOT', 'rt'): 5,
('MISO', 'da'): 60, ('MISO', 'rt'): 60,
('PJM', 'da'): 60, ('PJM', 'rt'): 60,
('NYISO', 'da'): 60, ('NYISO', 'rt'): 5,
('ISONE', 'da'): 60, ('ISONE', 'rt'): 60,
}
################
### DOWNLOAD ###
################
###############
### General use
def constructpayload(**kwargs):
out = []
for kwarg in kwargs:
out.append('{}={}'.format(kwarg, kwargs[kwarg]))
stringout = '&'.join(out)
return stringout
def constructquery(urlstart, **kwargs):
out = '{}{}'.format(urlstart, constructpayload(**kwargs))
return out
def stampify(date, interval=pd.Timedelta('1H')):
datetime = pd.Timestamp(date)
if interval == pd.Timedelta('1H'):
dateout = '{}{:02}{:02}T{:02}'.format(
datetime.year, datetime.month,
datetime.day, datetime.hour)
elif interval == pd.Timedelta('1D'):
dateout = '{}{:02}{:02}'.format(
datetime.year, datetime.month,
datetime.day)
return dateout
def download_file_series(urlstart, urlend, fileseries, filepath,
overwrite=False, sleeptime=60, numattempts=200, seriesname=True):
"""
Example
-------
You want to download a list of files at urls = [
'http://www.test.com/foo001.csv', 'http://www.test.com/foo002.csv'].
Then:
urlstart = 'http://www.test.com/foo'
urlend = '.csv'
fileseries = ['001', '002']
If you want the files to be named 'foo001.csv', use seriesname=False
If you want the files to be named '001.csv', use seriesname=True
"""
filepath = pvvm.toolbox.pathify(filepath, make=True)
### Make lists of urls, files to download, and filenames
urls = [(urlstart + file + urlend) for file in fileseries]
todownload = [os.path.basename(url) for url in urls]
if seriesname == True:
filenames = [os.path.basename(file) + urlend for file in fileseries]
else:
filenames = todownload
### Get the list of downloaded files
downloaded = [os.path.basename(file) for file in glob(filepath + '*')]
### Remake the list if overwrite == False
if overwrite == False:
filestodownload = []
urlstodownload = []
fileseriesnames = []
for i in range(len(filenames)):
if filenames[i] not in downloaded:
filestodownload.append(todownload[i])
urlstodownload.append(urls[i])
fileseriesnames.append(filenames[i])
elif overwrite == True:
filestodownload = todownload
urlstodownload = urls
fileseriesnames = filenames
### Download the files
for i in trange(len(urlstodownload)):
### Attempt the download
attempts = 0
while attempts < numattempts:
try:
urllib.request.urlretrieve(
urlstodownload[i], filepath + fileseriesnames[i])
break
except (HTTPError, IncompleteRead, EOFError) as err:
print(urlstodownload[i])
print(filestodownload[i])
print('Rebuffed on attempt # {} at {} by "{}".'
'Will retry in {} seconds.'.format(
attempts, pvvm.toolbox.nowtime(), err, sleeptime))
attempts += 1
time.sleep(sleeptime)
###########################
### Geographic manipulation
def rowlatlon2x(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
x = math.cos(latrad) * math.cos(lonrad)
return x
def rowlatlon2y(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
y = math.cos(latrad) * math.sin(lonrad)
return y
def rowlatlon2z(row):
latrad = row['latitude'] * math.pi / 180
z = math.sin(latrad)
return z
############
### ISO LMPs
"""
Note: These scripts worked as of early 2018, but MISO, PJM, and NYISO have since
changed their websites, and CAISO has removed data prior to 20150303. Scripts
are included here for documentary purposes and as a resource for future
data collection, but are unlikely to work given ISO website changes.
"""
def download_caiso_lmp_allnodes(market, start, filepathout,
product='LMP', numattempts=200, waittime=10):
urlstart = 'http://oasis.caiso.com/oasisapi/GroupZip?'
columnsout = [
'INTERVALSTARTTIME_GMT', 'NODE', 'MW',
'OPR_DT', 'OPR_HR', 'OPR_INTERVAL']
if market in ['RTM', 'HASP', 'RTPD']:
interval = pd.Timedelta('1H')
elif market in ['DAM', 'RUC']:
interval = | pd.Timedelta('1D') | pandas.Timedelta |
"""Defines the clustering algorithms and handles running them. Primarily used for analysis and instance space generation.
"""
from collections import defaultdict
from pathlib import Path
from itertools import zip_longest
import warnings
import inspect
import numpy as np
import pandas as pd
import sklearn.cluster
import sklearn.mixture
from sklearn.metrics import adjusted_rand_score
from scipy.spatial.distance import pdist, squareform
import hawks.utils
import hawks.problem_features
warnings.filterwarnings(
action='ignore', category=FutureWarning, module="sklearn"
)
def define_cluster_algs(seed):
"""Defines some default clustering algorithms. Currently uses four simple algorithms: average-linkage, GMM, K-Means++, and single-linkage.
Args:
seed (int): Random seed given to the algorithms. ``int`` is generally fine, but depends on the algorithm implementation.
Returns:
dict: A dict where each key is the name of the algorithm, with ``"class"`` as a callable to create (and fit) the model, any ``"kwargs"`` it needs, and ``"k_multiplier"`` if anything other than the true number of clusters is desired.
.. todo::
Extend functionality for arbitrary clustering algorithms
"""
cluster_algs = {
"Average-Linkage": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "average",
"n_clusters": None
},
"k_multiplier": None
},
"Average-Linkage (2K)": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "average",
"n_clusters": None
},
"k_multiplier": 2.0
},
"GMM": {
"class": getattr(sklearn.mixture, "GaussianMixture"),
"kwargs": {
"n_components": None,
"random_state": seed,
"n_init": 3
},
"k_multiplier": None
},
"K-Means++": {
"class": getattr(sklearn.cluster, "KMeans"),
"kwargs": {
"n_clusters": None,
"random_state": seed,
"n_init": 10
},
"k_multiplier": None
},
"Single-Linkage": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "single",
"n_clusters": None
},
"k_multiplier": None
},
"Single-Linkage (2K)": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "single",
"n_clusters": None
},
"k_multiplier": 2.0
}
}
return cluster_algs
def extract_datasets(generator=None, datasets=None, label_sets=None):
# Something needs to be given
if generator is None and datasets is None:
raise ValueError(f"No generator or datasets have been given - there's nothing to evaluate!")
# Extract the datasets and labels from the generator
if generator is not None:
# Create local references to datasets and label_sets
datasets, label_sets, configs = generator.get_best_dataset(return_config=True)
# Get a flat list of the config id for each one of the datasets
config_nums = np.arange(
len(configs)
).repeat(
generator.full_config["hawks"]["num_runs"]
).tolist()
# Otherwise just set the config number to be None's
else:
config_nums = [None]*len(datasets)
# Test for unequal number of datasets and label sets
if len(datasets) != len(label_sets):
raise ValueError("The number of datasets is not equal to the number of labels")
return datasets, label_sets, config_nums
def setup_folder(save_folder, generator):
# Prioritize a given save folder
if save_folder is not None:
base_folder = Path(save_folder)
# Or use the generator's folder
elif generator is not None and generator.base_folder is not None:
base_folder = generator.base_folder
# Use current date in cwd as last resort
else:
base_folder = Path.cwd() / f"clustering_{hawks.utils.get_date()}"
return base_folder
def analyse_datasets(generator=None, datasets=None, label_sets=None, cluster_subset=None, feature_subset=None, seed=None, source="HAWKS", prev_df=None, clustering=True, feature_space=True, save=True, save_folder=None, filename="dataset_analysis"):
"""Function to analyze the datasets, either by their :py:mod:`~hawks.problem_features`, clustering algorithm performance, or both.
Args:
generator (:class:`~hawks.generator.BaseGenerator`, optional): HAWKS generator instance (that contains datasets). Defaults to None.
datasets (list, optional): A list of the datasets to be examined. Defaults to None.
label_sets (list, optional): A list of labels that match the list of datasets. Defaults to None.
cluster_subset (list, optional): A list of clustering algorithms to use. Defaults to None, where all default clustering algorithms (specified in :func:`~hawks.analysis.define_cluster_algs`) are used.
feature_subset (list, optional): A list of problem features to use. Defaults to None, where all problem features (specified in :mod:`~hawks.problem_features`) are used.
seed (int, optional): Random seed number. Defaults to None, where it is randomly selected.
source (str, optional): Name of the set of datasets. Useful for organizing/analyzing/plotting results. Defaults to "HAWKS".
prev_df (:py:class:`~pandas.DataFrame`, optional): Pass in a previous DataFrame, with which the results are added to. Defaults to None, creating a blank DataFrame.
clustering (bool, optional): Whether to run clustering algorithms on the datasets or not. Defaults to True.
feature_space (bool, optional): Whether to run the problem features on the datasets or not. Defaults to True.
save (bool, optional): Whether to save the results or not. Defaults to True.
save_folder (str, :class:`pathlib.Path`, optional): Where to save the results. Defaults to None, where the location of the :class:`~hawks.generator.BaseGenerator` is used. If no :class:`~hawks.generator.BaseGenerator` instance was given, create a folder in the working directory.
filename (str, optional): Name of the CSV file to be saved. Defaults to "dataset_analysis".
Returns:
(tuple): 2-element tuple containing:
:py:class:`~pandas.DataFrame`: DataFrame with results for each dataset.
:py:class:`pathlib.Path`: The path to the folder where the results are saved.
"""
if clustering is False and feature_space is False:
raise ValueError("At least one of `clustering` or `feature_space` must be selected, otherwise there is nothing to do")
# Extract the datasets
datasets, label_sets, config_nums = extract_datasets(
generator=generator,
datasets=datasets,
label_sets=label_sets
)
# Setup the save folder
if save or save_folder is not None:
base_folder = setup_folder(save_folder, generator)
# If a path is given for the save folder, assume saving is wanted
save = True
else:
base_folder = None
# Initialize the dataframe
df = pd.DataFrame()
# Provided seed has priority, then seed from generator
if seed is None and generator is not None:
seed = generator.seed_num
# Otherwise random seed, but raise warning due to unreliable reproducibility
elif seed is None and generator is None:
seed = np.random.randint(100)
warnings.warn(
message=f"No seed was provided, using {seed} instead",
category=UserWarning
)
# Setup and run feature space functions
if feature_space:
# Get the functions from problem_features.py (not imported)
feature_funcs = dict(
[func_tup for func_tup in inspect.getmembers(hawks.problem_features, inspect.isfunction) if func_tup[1].__module__ == "hawks.problem_features"]
)
# If a feature subset has been given, remove those functions
if feature_subset is not None:
feature_dict = {}
for feature_name in feature_subset:
try:
feature_dict[feature_name] = feature_funcs[feature_name]
except KeyError as e:
raise Exception(f"{feature_name} cannot be found, must be in: {feature_funcs.keys()}") from e
else:
feature_dict = feature_funcs
feature_df = run_feature_space(datasets, label_sets, config_nums, feature_dict, df, source)
# Setup and run clustering algorithms
if clustering:
# Get the defined clustering algs
cluster_algs = define_cluster_algs(seed)
# If a subset of algorithms is given, then select only those
if cluster_subset is not None:
alg_dict = {}
for alg_name in cluster_subset:
try:
alg_dict[alg_name] = cluster_algs[alg_name]
except KeyError as e:
raise Exception(f"{alg_name} cannot be found, must be in: {cluster_algs.keys()}") from e
else:
alg_dict = cluster_algs
# Run the clustering algorithms
cluster_df = run_clustering(datasets, label_sets, config_nums, alg_dict, df, source)
# Join the dataframes if need be
if feature_space and clustering:
# Need to merge on source and dataset number
# Use concat to handle when config_num may be undefined (rather than pd.merge)
final_df = pd.concat([cluster_df, feature_df], axis=1)
final_df = final_df.loc[:, ~final_df.columns.duplicated()]
elif feature_space:
final_df = feature_df
elif clustering:
final_df = cluster_df
if prev_df is not None:
final_df = prev_df.append(
final_df,
ignore_index=True,
sort=False
)
# Save the full dataframe
if save:
base_folder.mkdir(parents=True, exist_ok=True)
hawks.utils.df_to_csv(
df=final_df,
path=base_folder,
filename=filename
)
return final_df, base_folder
def run_clustering(datasets, label_sets, config_nums, alg_dict, df, source):
"""Function to actually run the clustering algorithms and add results to the DataFrame.
Args:
datasets (list, optional): A list of the datasets to be examined. Defaults to None.
label_sets (list, optional): A list of labels that match the list of datasets. Defaults to None.
config_nums (list): A list of the config numbers (only relevant for HAWKS, not external datasets). Allows linking of datasets to parameter configuration.
alg_dict (dict): Dictionary of the clustering algorithms. Defined in :func:`~hawks.analysis.define_cluster_algs`.
df (:py:class:`~pandas.DataFrame`): DataFrame to add the results to.
source (str): Name of the set of datasets.
Returns:
:py:class:`~pandas.DataFrame`: DataFrame with the clustering results.
"""
# Loop over the datasets
for dataset_num, (data, labels, config_num) in enumerate(zip_longest(datasets, label_sets, config_nums)):
# Create the defaultdict
res_dict = defaultdict(list)
# Add the constants to the dict
res_dict["source"].append(source)
res_dict["config_num"].append(config_num)
res_dict["dataset_num"].append(dataset_num)
# Add some extra general info about the dataset here
res_dict["num_examples"].append(int(data.shape[0]))
res_dict["num_clusters"].append(int(np.unique(labels).shape[0]))
# Loop over the dict of clustering algorithms
for name, d in alg_dict.items():
# Add in the number of clusters
d["kwargs"] = determine_num_clusters(name, d["kwargs"], d["k_multiplier"], labels)
# Increment the seed to avoid pattern in datasets
if "random_state" in d["kwargs"]:
d["kwargs"]["random_state"] += 1
# Pass the kwargs to the relevant algorithm class
alg = d["class"](**d["kwargs"])
# Run the algorithm
alg.fit(data)
# Predict labels and compare if we have the truth
if labels is not None:
# import pdb; pdb.set_trace()
# Obtain labels for this algorithm on this dataset
if hasattr(alg, "labels_"):
labels_pred = alg.labels_.astype(np.int)
else:
labels_pred = alg.predict(data)
ari_score = adjusted_rand_score(labels, labels_pred)
# No labels, so just set scores to NaN
else:
ari_score = np.nan
# Add the cluster name and scores
res_dict[f"c_{name}"].append(ari_score)
# Calculate evaluation metrics and add to df
# Not particularly efficient
df = df.append(
| pd.DataFrame.from_dict(res_dict) | pandas.DataFrame.from_dict |
#To Do Baranski:
# -Build the Events
# -- Muss ich noch ausbauen, dass nicht nur die States sondern auch die Transitions mit zusatzinfos gespeichert werden (Laenge, Step)
# -- Dafuer baue ich eine eigene Unterfunktion
# -Print the Events im 3d Raum
# -Clustering
# --Hier nehme ich KMeans, muss eben aus der Dokumentation raus lesen wie hoeherdimensionale Daten ausgelesen werden
# --Auch GMM mal probieren. -> Das austesten wie viele Schritte es gibt wird interessant
# Ggf kann man dann noch eine Restgruppe festlegen und das muessen dann die ueberlagerten sein.
# --Ggf ist Ward Clustering eine wesentlich bessere Idee (Insbesondere auch Moeglichkeit Connectivity Matrix zu setzen)
#
# -Print Events mit Clustering
# -Die FHMMs bilden
# -- abschreiben aus Matlab
# -Die Ablaeufe bilden
# -- abschreiben aus Matlab
# Das Events raus rechnen baue ich am Besten in den Training Step. Wie Preprocessing.
# => Obwohl das natuerlich nur einen einzelnen Datensatz Sinn macht
# Kommentare zu Vgl mit sklearn:
# 1. kwargs ist dort eher verboten (siehe BaseEstimator Doku)
# 2. Es gibt eine Score Funktion
# Die ist in dem Beispiel in NilmTK nur von aussen ergaenzt ueber diese Predict Hilfsfunktion
# In sklearn in Mixins beschrieben. Aber in Doku falsch wuerde ich sagen, dass ALLE estimators score() haben.
# General packages
from __future__ import print_function, division
from collections import OrderedDict, deque
import numpy as np
import pickle
import os
import sys
from itertools import product
# Packages for data handling and machine learning
import pandas as pd
import matplotlib.pyplot as plt
from sklearn_pandas import DataFrameMapper
import sklearn.preprocessing, sklearn.decomposition, sklearn.linear_model, sklearn.pipeline, sklearn.metrics
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cluster import KMeans, AgglomerativeClustering
import sklearn.metrics
# Packages from nilmtk
from nilmtk.disaggregate import UnsupervisedDisaggregator
from nilmtk.feature_detectors.cluster import hart85_means_shift_cluster
from nilmtk.feature_detectors.steady_states import find_steady_states_transients
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
class CustomBaranski(UnsupervisedDisaggregator):
""" Customized Baranski Algorithm.
Allows multiple states per machine and additionally supports
multiple states.
Erweiterungen die in NILM Eval unterschlagen wurden:
1. Die hochfrequenten events werden zusammen genommen (Das stand ja auch schon im Paper Seite 2)
2. Die Erweiterung ist dann eben, dass man komplexe Events mit beruecksichtigt. Siehe das Nachfolgepaper.
Probleme denen ich mich direkt stellen kann, die in Matlab auch nicht geloest wurden:
1. Ueber die Zeit koennen sich die Anlagen veraendern -> Wenn ich alles betrachte ist mir das ja im Prinzip egal.
-> Das ist mir ja im Prinzip egal!
2. Grosse Datenmengen: Es ist mit Problemen ueber mehrere Jahre 3 phasig zu rechnen.
-> Der Featurespace ist DEUTLICH kleiner
-> Ansaetze wiederholende Pattern zu erkennen
=> Komplexitaet Suchalgorithmus
Datenstruktur: Alle Events in eine Reihe schreiben,
Attributes
----------
model : dict
Mal noch nichts.
"""
#region All disaggregator functions which are not used at the moment
def export_model(self, filename):
raise NotImplementedError("Muss ich ggf noch mal von den anderen Klassen herkopieren.")
def import_model(self, filename):
raise NotImplementedError("Muss ich ggf noch mal von den anderen Klassen herkopieren.")
#endregion
#region Used disaggregator functions
def __init__(self):
self.MODEL_NAME = "BARANSKI"
self.cols = [("power", "active")]
self.noise_level = 70
self.state_threshold = 15
self.max_num_clusters = 12 # from matlab project
def train(self, metergroup, **load_kwargs):
""" Gets a site meter and trains the model based on it.
Goes chunkwise through the dataset and returns the events.
In the end does a clustering for identifying the events.
For signature description see basic class: It should get a sitemeter for unsupervised learning.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
For custom baranski (is unsupervised), this is a single site meter.
"""
# Go through all parts and extract events
events = []
# 1. Get Events (Das ist ja schon vorhanden) -> Das sollte ich ausbauen -> GetSignatures
# -> man separiert in die verschiedenen moeglichen Signaturen
# --> Einen Separator als Oberklasse, Dann mehrere Separatoren fuer die einzelnen Typen an Effekt
# -Rising Spike:
# -Rising Spike:
# -Pulse
# -Fluctuation
# -Quick Vibrate
# -Gradual Falling
# -Flatt
# --> Man arbeitet mit Verdeckung: Event, verdeckt wenn RaisingSpike, FallingSpike, verdeckt wenn Pulse
#
# --> Jede Signatur hat eigene spezielle Eigenschaften
# --> Einige sollten eine Wildcard beinhalten
# Ich will hier ein 3d Pandas aufbauen
#events = self._load_if_available()
#if not events is None:
# self.events = events
# return
events = pd.DataFrame()
for i, elec in enumerate(metergroup.all_meters()):
print("Find Events for " + str(elec.metadata))
transitions = find_steady_states_transients(
elec, cols=self.cols, state_threshold=self.state_threshold,
noise_level=self.noise_level, **load_kwargs)[1]
# Mark as on- or off-event
transitions['type'] = transitions >= 0
transitions['meter'] = elec
events = events.append(transitions)
events.index.rename('time', inplace=True)
events.set_index(['type', 'meter'], append=True, inplace=True)
events = events.reorder_levels([2,1,0])
events.sort_index(inplace=True)
# Hier vielleicht noch die Kombinationen finden
self.events = events
#self._save(events)
# 2. Cluster the events using different cluster methodologies (Zuweisung passiert automatisch)
# Ah es gibt doch ein predict: Und zwar elemente Clustern zuweisen
clusters = None #self. _load_if_available(what='cluster')
if clusters is None:
for curGroup, groupEvents in events.groupby(['meter','type']):
centroids, assignments = self._cluster_events(groupEvents, max_num_clusters=self.max_num_clusters, method='kmeans')
events.loc[curGroup,'cluster'] = assignments
#self._save(events, 'cluster')
else:
pass
#events = clusters
self.model = events
def train_on_chunk(self, chunk):
"""
This function is actually not needed as the chunkwise processing is included inside the find_steady_states_transients function.
This function goes through the power line and already identifies the events.
For signature description see basic class: Only gets the chunk from the sitemeter, as it is unsupervised.
Parameters
----------
chunk : pd.DataFrame where each column represents a
disaggregated appliance
meter : ElecMeter for this chunk
"""
pass
def disaggregate(self, mains, output_datastore, **load_kwargs):
"""Disaggregate mains according to the model learnt previously.
At the moment not used as we use the predict function in the main
script.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup => In facts the 3 phases
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm. => Wird in einem 2. Step benutzt
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
"""
# 3. Generate Finite State Machines (Test whether clustering fits)
# 4. Bildender Sequenzen fuer jede State machine
for meter, content in clusters.groupby(level=0):
length = []
for type, innerContent in content.groupby(level=1):
length.append(len(innerContent.reset_index().cluster.unique()))
if len(set(length)) > 1 :
raise Exeption("different amount of clusters")
clusters['appliance'] = | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
"""
Firt code created on Tue Aug 14 14:59:13 2018
@author: <NAME> - <EMAIL>
CARS Functions to read the input files from input folder:
activity_data
emissions_factor
fleet_mix
temporal_profile
This functions will read the input data
"""
import os, sys, time, matplotlib, glob, fnmatch
import xarray as xr
import pandas as pd
import geopandas as gpd
import numpy as np
import datetime as dt
from shapely.geometry import Polygon
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
import cartopy.crs as ccrs
import geopandas.tools
import shapely
from shapely.geometry import *
total_start_time = time.time()
case_name = 'Seoul_1x1km'
home_dir = 'C:/Users/pedruzzi/OneDrive - University of North Carolina at Chapel Hill/0000_EUA_IE/001_mobile_source_inventory/CARS_source_code'
src_dir = home_dir+'/src'
input_dir = home_dir+'/input'
inter_dir = home_dir+'/intermediate'
output_dir = home_dir+'/output_seoul'
if not os.path.exists(home_dir+'/intermediate'):
print('*** intermediate directory does not exist: Creating a new one')
os.makedirs(home_dir+'/intermediate/')
#fleet_mix_file = 'age_distribution.csv'
fleet_mix_Age_file = 'Seoul_age_distribution_2015.csv' #''age_distribution_Activity_Data_Seoul_2017.csv' #'
Emis_Factor_list = ['gasoline.csv','diesel.csv','cng.csv','lpg.csv']
#EF_Gasoline_file = 'gasoline.csv'
#EF_Diesel_file = 'diesel.csv'
#EF_CNG_file = 'cng.csv'
#EF_LPG_file = 'lpg.csv'
avg_SPD_Dist_file = 'avgSpeedDistribution_rev_00.csv'
ambient_temp = 15.0
plot_24 = 'no '#'yes'
link_shape = '/shapes'+'/GIS Korea Road Link Activity Data'+ \
'/shape_seoul'+'/seoul_eup_links_Avg_VKT_UTM52N.shp'
link_shape_att = ['link_id' , 'EMD_CD' , 'EMD_ENG_NM', 'EMD_KOR_NM',
'road_type', 'speed', 'length_2', 'Avg_VKT']
#
#link_shape = '/shapes/GIS Korea Road Link Activity Data'+ \
# '/shape_seoul/seoul_eup_road_by_county_UTM52N.shp'
#link_shape_att = ['LINK_ID' , 'EMD_CD' , 'EMD_ENG_NM', 'ROAD_NAME',
# 'ROAD_RANK', 'MAX_SPD', 'length_2', 'Avg_VKT']
county_shape = '/shapes/GIS Korea Road Link Activity Data'+ \
'/shape_seoul/seoul_eup_UTM52N.shp'
#link_shape = '/shapes/GIS Korea Road Link Activity Data'+ \
# '/shape_soul_gyeonggi/soul_gyeonggi_Links_UTM52N.shp'
#link_shape_att = ['link_id' , 'EMD_CD' , 'EMD_ENG_NM', 'EMD_ENG_NM',
# 'link_type', 'speed', 'length',]
#
#county_shape = '/shapes/GIS Korea Road Link Activity Data'+ \
# '/shape_soul_gyeonggi/soul_gyeonggi_eup_UTM52N.shp'
temporal_profile_folder = input_dir+'/temporal_profile'
temporal_profile_file = 'temporal_profile_SK.csv'
temporal_cross_ref_file = 'temporal_cross_ref_SK.csv'
temp = np.asarray([.0094, .0060, .0050, .0065, .0126, .0347, .0591, .0605, .0558, .0545,
.0536, .0532, .0538, .0539, .0559, .0569, .0580, .0611, .0586, .0525, .0495,.0419, .0308, .0162])
grid_size = 1000
activity_file = 'seoul_2017.csv' #'seoul_gyeonggi_AD.csv' #'seoul_all.csv' # #'activity_data.csv'
class EmissionFactor_table:
def __init__(self, dataframe, name ):
self.dataframe = dataframe
self.name = name.split('.')[0]
class Activity_Data_table:
def __init__(self, dataframe, vhc_name, years):
self.data = dataframe
self.fullname = vhc_name
self.years = years
class Roads_Grid_table:
def __init__(self, grid_dataframe, surrogate, roads_df):
self.grid = grid_dataframe
self.surrogate = surrogate
self.roads_df = roads_df
class EF_Grid_table:
def __init__(self, EF_dataframe, EF_years, VHC_fullname_EF, Fuel_EF, Polls_EF):
self.data = EF_dataframe
self.EF_years = EF_years
self.EF_fullname = VHC_fullname_EF
self.EF_fuels = Fuel_EF
self.EF_polls = Polls_EF
class EF_Speed_Distribution:
def __init__(self, SPD_dataframe, Speeds, Speed_Bins):
self.data = SPD_dataframe
self.spd = Speeds
self.spd_bins = Speed_Bins
class Emissions_table:
def __init__(self, County_Emissions, Road_Emissions, County_Emissions_GeoRef, County, Years):
self.county_emis = County_Emissions
self.road_emis = Road_Emissions
self.county_geo = County_Emissions_GeoRef
self.county = County
self.years = Years
# =============================================================================
# Function to read the Fleet Mix
# =============================================================================
def read_flet_mix(input_dir, fleet_mix_file, sep = ';'):
#sep = ';'
name = '{0}{1}{2}'.format(input_dir,'/age_distribuition/',fleet_mix_file)
if os.path.exists(name) == True:
print ('')
print ('Reading Fleet Mix ...')
print (name)
fleet_mix = pd.read_csv(name, sep = sep).fillna(0)
# fleet_mix['Name'] = fleet_mix.Vehicle.str.cat(vhc_type[['Types','Fuel']], sep=' ')
else:
print ('')
print('*** ERROR ABORT ***: Fleet Mix file "" ', fleet_mix_file, ' "" does not exist!')
sys.exit('CARS preProcessor can not read fleet mix file')
return fleet_mix
# =============================================================================
FM = read_flet_mix(input_dir,fleet_mix_Age_file)
year_FM = [int(i) for i in list(FM.columns[4:])]
vhc_type = FM.loc[:,['Vehicle','Types','Fuel']]
vhc_type['Name'] = vhc_type.Vehicle.str.cat(vhc_type[['Types','Fuel']], sep=' ')
# =============================================================================
# Function to read the temporal profile
# =============================================================================
def read_temporal_profile(input_dir, temporal_profile_file, sep = ';'):
name = '{0}{1}{2}'.format(input_dir,'/temporal_profile/',temporal_profile_file)
if os.path.exists(name) == True:
print ('')
print ('Reading Temporal profile ...')
print (name)
TP = pd.read_csv(name, sep = sep).fillna(np.nan)
else:
print ('')
print('*** ERROR ABORT ***: Temporal Profile ', temporal_profile_file, ' "" does not exist!')
sys.exit('CARS preProcessor can not read Temporal profile')
return TP
TP = read_temporal_profile(input_dir,temporal_profile_file)
# =============================================================================
# Function to read the temporal profile cross reference
# =============================================================================
def read_temporal_Cross_ref(input_dir, temporal_cross_ref_file, sep = ';'):
name = '{0}{1}{2}'.format(input_dir,'/temporal_profile/',temporal_cross_ref_file)
if os.path.exists(name) == True:
print ('')
print ('Reading Temporal Cross reference ...')
print (name)
TPCR = pd.read_csv(name, sep = sep).fillna(np.nan)
else:
print ('')
print('*** ERROR ABORT ***: Temporal Profile cross reference ', temporal_cross_ref_file, ' "" does not exist!')
sys.exit('CARS preProcessor can not read Temporal Profile cross reference')
return TPCR
TPCR = read_temporal_Cross_ref(input_dir, temporal_cross_ref_file)
# =============================================================================
# Function to read the Speed average distribution
# =============================================================================
def read_avgSpeedDistribution(input_dir, avg_Speed_Distribution_file, sep = ';'):
start_time = time.time()
input_dir = input_dir
spd_file = 'avgSpeedDistribution_rev_00.csv' #avg_Speed_Distribution_file #ef_file #['gasoline.csv'] #
sep = ';'
final_df = pd.DataFrame()
name = '{0}{1}{2}'.format(input_dir,'/emissions_factor/',spd_file)
if os.path.exists(name) == True:
print ('')
print ('Reading Average Speed Distribution table ...')
print (name)
ASD = pd.read_csv(name, sep = sep).fillna(np.nan)
else:
print ('')
print('*** ERROR ABORT ***: Temporal Profile cross reference ', temporal_cross_ref_file, ' "" does not exist!')
sys.exit('CARS preProcessor can not read Temporal Profile cross reference')
out_spd_bins = pd.DataFrame({'spd_bins': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]})
out_spd = pd.DataFrame({'spd': list(ASD.Speed)})
return EF_Speed_Distribution(ASD, out_spd, out_spd_bins)
avgSpeedDist = read_avgSpeedDistribution(input_dir, avg_SPD_Dist_file, sep = ';')
# =============================================================================
# Function to read the Activity Data
# =============================================================================
def read_activity_data_csv_SK(input_dir, ad_file, sep = ';'):
start_time = time.time()
ad_file = ad_file
# ad_file = activity_file
# sep = ','
name = '{0}{1}{2}'.format(input_dir,'/activity_data/',ad_file)
if os.path.exists(name) == True:
print ('')
print ('Reading Activity Data table ...')
print (name)
activity_data = (pd.read_csv(name, sep = sep, encoding = 'utf-8')).fillna(0)
activity_data.loc[:,'Vehicle'] = activity_data.loc[:,'Vehicle'].str.lower()
activity_data.loc[:,'Fuel'] = activity_data.loc[:,'Fuel'].str.lower()
activity_data.loc[:,'Types'] = activity_data.loc[:,'Types'].str.lower()
activity_data = activity_data.rename(columns={'Region_code' : 'region_cd'})
activity_data.loc[:,'Manufacture_date'] = (activity_data.loc[:,'Manufacture_date'] / 10000).astype(int)
activity_data['FullName'] = activity_data.Vehicle.str.cat(activity_data[['Types','Fuel']], sep='_')
## gasoline_correction = (activity_data.loc[activity_data.Fuel == 'gasoline','Daily_VKT']) * 3 #mutiplying the gasoline VKT by 3 as BH asked
## activity_data.loc[activity_data.Fuel == 'gasoline','Daily_VKT'] = gasoline_correction
sedan_correction = (activity_data.loc[(activity_data.Vehicle == 'sedan'),'Daily_VKT']) * 3
activity_data.loc[(activity_data.Vehicle == 'sedan'),'Daily_VKT'] = sedan_correction
suv_correction = (activity_data.loc[(activity_data.Vehicle == 'suv'), 'Daily_VKT']) * 3
activity_data.loc[(activity_data.Vehicle == 'suv'), 'Daily_VKT'] = suv_correction
grouped = activity_data.groupby(['Manufacture_date','region_cd','FullName']).sum()
grouped = grouped.unstack().fillna(0)
grouped.columns = [x[1] for x in grouped.columns]
grouped.reset_index(inplace=True)
grouped.columns = grouped.columns.str.lower()
grouped = grouped.sort_values(by=['region_cd','manufacture_date']).reset_index(drop=True)
vhc_names = pd.DataFrame({'vhc_name' : list(activity_data.FullName.unique())})
vhc_years = pd.DataFrame({'vhc_years' : list(grouped.manufacture_date.unique())})
out_table = Activity_Data_table(grouped, vhc_names,vhc_years)
else:
print('*** ERROR ABORT ***: Emissions Factor file "" ', ad_file, ' "" does not exist!')
sys.exit('CARS preProcessor can not read Emissions Factor file')
run_time = ((time.time() - start_time))
print("--- %f seconds ---" % (run_time))
print("--- %f minutes ---" % (run_time/60))
print("--- %f Hours ---" % (run_time/3600))
return out_table
## =============================================================================
AD_SK = read_activity_data_csv_SK(input_dir, activity_file, sep = ',')
# =============================================================================
# Function to read link level shapefile
# =============================================================================
def roads_grid_surrogate_inf(input_dir, file_name, Link_ID_attr,
Region_Code, Region_Name,
RD_name_attr, RD_type_attr,
Speed_attr, Link_length, VKT_attr, Unit_meters = True):
start_time = time.time()
Link_ID_attr = Link_ID_attr
Region_CD = Region_Code
Region_NM = Region_Name
RD_name_attr = RD_name_attr
RD_type_attr = RD_type_attr
Speed_attr = Speed_attr
Link_length = Link_length
VKT_attr = VKT_attr
file_name = link_shape
shp_file = '{0}{1}'.format(input_dir,file_name)
if os.path.exists(shp_file) == True:
print ('')
print ('Reading Link Shapefile ...')
print (shp_file)
prj_file = shp_file.replace('.shp', '.prj')
prj = [l.strip() for l in open(prj_file,'r')][0]
lnk_shp = gpd.read_file(shp_file)
out_roads = lnk_shp.loc[:,['geometry',Link_ID_attr, Region_CD, Region_NM,
RD_name_attr, RD_type_attr, Speed_attr, Link_length, VKT_attr]]
number_links = np.arange(0,len(out_roads))
# changing the name of columns to keep a standard
out_roads = out_roads.rename(columns={Link_ID_attr : 'link_id'})
out_roads = out_roads.rename(columns={Region_CD : 'region_cd'})
out_roads = out_roads.rename(columns={Region_NM : 'region_nm'})
out_roads = out_roads.rename(columns={RD_name_attr : 'road_name'})
out_roads = out_roads.rename(columns={RD_type_attr : 'road_type'})
out_roads = out_roads.rename(columns={Speed_attr : 'max_speed'})
out_roads = out_roads.rename(columns={Link_length : 'link_length'})
out_roads = out_roads.rename(columns={VKT_attr : 'vkt_avg'})
out_roads['number_links'] = number_links
out_roads['activity_data'] = (out_roads['link_length'] * 0.0).astype(float)
out_roads['region_cd'] = out_roads['region_cd'].astype(int)
out_roads['road_type'] = out_roads['road_type'].astype(int)
out_roads['link_id'] = out_roads['link_id'].astype(np.int64)
out_roads['max_speed'] = out_roads['max_speed'].astype(float)
out_roads['link_length'] = out_roads['link_length'].astype(float)
out_roads['number_links'] = out_roads['number_links'].astype(int)
out_roads['geometry_BKP'] = out_roads.geometry
out_roads['geometry'] = out_roads.buffer(0.1)
out_roads['total_area'] = out_roads.area
out_roads['link_split_total'] = (out_roads['link_length'] * 0.0).astype(float)
out_roads['link_split_county'] = (out_roads['link_length'] * 0.0).astype(float)
out_roads['vkt_split_county'] = (out_roads['link_length'] * 0.0).astype(float)
reduc = 0.6 #60% reduction as BH asked
rt = {101 : 80 *reduc, 102 : 60 *reduc, 103 : 60 *reduc, 104 : 50 *reduc, #60% reduction as BH asked
105 : 30 *reduc, 106 : 30 *reduc, 107 : 30 *reduc, 108 : 30 *reduc}
for igeocd in out_roads.region_cd.unique():
aux_split_county = out_roads.link_length.loc[out_roads.region_cd == igeocd].values / \
(out_roads.link_length.loc[out_roads.region_cd == igeocd]).sum()
out_roads.loc[out_roads.region_cd == igeocd, ['link_split_county']] = aux_split_county
vkt_split_county = out_roads.vkt_avg.loc[out_roads.region_cd == igeocd].values / \
(out_roads.vkt_avg.loc[out_roads.region_cd == igeocd]).sum()
out_roads.loc[out_roads.region_cd == igeocd, ['vkt_split_county']] = vkt_split_county
aux_split_total = out_roads.link_length.values / out_roads.link_length.sum()
out_roads.loc[:,['link_split_total']] = aux_split_total
for key, ispd in rt.items():
out_roads.loc[(out_roads.loc[:,'road_type'] == key),'max_speed'] = ispd
# Creating grid
roads_bounds = out_roads.bounds
xmin = roads_bounds.minx.min() - grid_size
xmax = roads_bounds.maxx.max() + grid_size
ymin = roads_bounds.miny.min() - grid_size
ymax = roads_bounds.maxy.max() + grid_size
cols = int(abs(xmax - xmin) / grid_size)
rows = int(abs(ymax - ymin) / grid_size)
print(cols,rows)
lat_list = (np.arange(ymin,ymax,grid_size))
lon_list = (np.arange(xmin,xmax,grid_size))
polygons = []
grid_row = []
grid_col = []
for j in range(0,rows):
yini = lat_list[j]
yfin = lat_list[j+1]
for i in range(0,cols):
grid_row.append(j+1)
grid_col.append(i+1)
xini = lon_list[i]
xfin = lon_list[i+1]
polygons.append(Polygon([(xini, yini), (xfin, yini), (xfin, yfin), (xini, yfin), (xini, yini)]))
crs = out_roads.crs #{'init': 'epsg:4326'}
grid_ID = [x for x in range (1,len(polygons)+1)]
grid = gpd.GeoDataFrame({'geometry':polygons, 'grid_id':grid_ID,
'row':grid_row, 'col':grid_col}, crs=crs)
grid.crs = crs
# exporting grid as shapefile
grid.to_file(filename = output_dir+'/grid_{0}.shp'.format(case_name), driver='ESRI Shapefile',crs_wkt=prj)
#creating the surrogate
surrogate = gpd.overlay(out_roads, grid, how='intersection').reset_index(drop=True)
surrogate['split_area'] = surrogate.area
surrogate['weight_factor'] = surrogate.area / surrogate.total_area #total_area attibute is comming from roads dataframe
surrogate = surrogate.loc[:,['geometry', 'link_id', 'region_cd',
'link_length', 'total_area', 'split_area',
'grid_id','row', 'col', 'weight_factor']]
surrogate.to_file(filename = output_dir+'/road_grid_surrogate_{0}.shp'.format(case_name), driver='ESRI Shapefile',crs_wkt=prj)
out_roads = out_roads.drop(columns=['geometry'])
out_roads = out_roads.rename(columns={'geometry_BKP': 'geometry'}).set_geometry('geometry')
else:
print('*** ERROR ABORT ***: Shapefile "" ', shp_file, ' "" does not exist!')
sys.exit('CARS preProcessor can not read link Shapefile file')
run_time = ((time.time() - start_time))
print("--- %f seconds ---" % (run_time))
print("--- %f minutes ---" % (run_time/60))
print("--- %f Hours ---" % (run_time/3600))
return Roads_Grid_table(grid, surrogate, out_roads)
# =============================================================================
roads_RGS = roads_grid_surrogate_inf(input_dir,link_shape, link_shape_att[0],
link_shape_att[1],link_shape_att[2],
link_shape_att[3],link_shape_att[4],
link_shape_att[5],link_shape_att[6], link_shape_att[7], Unit_meters = True )
# =============================================================================
# Function to read link level shapefile
# =============================================================================
def processing_County_shape(input_dir, file_name, Region_CD, Region_name_attr,
Region_name_attr_SK):
start_time = time.time()
Region_Geocode = Region_CD
Region_name_attr = Region_name_attr
Region_name_attr_SK = Region_name_attr_SK
# Link_ID_attr = 'LINK_ID'
# RD_name_attr = 'ROAD_NAME'
# RD_type_attr = 'ROAD_RANK'
# Activity_data_attr = 'SHAPE_STLe'
# Speed_attr = 'MAX_SPD'
# Link_length = 'SHAPE_STLe'
# file_name = link_shape
cnty_file = '{0}{1}'.format(input_dir,file_name)
if os.path.exists(cnty_file) == True:
print ('')
print ('Reading Link Shapefile to get bounds to create the GRID...')
print (cnty_file)
cnty_shp = gpd.read_file(cnty_file)
cnty_shp = cnty_shp.rename(columns={Region_Geocode : 'region_cd'})
cnty_shp = cnty_shp.rename(columns={Region_name_attr : 'region_nm'})
cnty_shp = cnty_shp.rename(columns={Region_name_attr_SK : 'region_nm_SK'})
out_cnty = cnty_shp.loc[:,['geometry','region_cd','region_nm',
'region_name_SK']]
out_cnty['region_cd'] = out_cnty['region_cd'].astype(int)
else:
print('*** ERROR ABORT ***: Shapefile "" ', cnty_file, ' "" does not exist!')
sys.exit('CARS preProcessor can not read county Shapefile file')
run_time = ((time.time() - start_time))
print("--- %f seconds ---" % (run_time))
print("--- %f minutes ---" % (run_time/60))
print("--- %f Hours ---" % (run_time/3600))
return out_cnty
# =============================================================================
county_df = processing_County_shape(input_dir, county_shape, 'EMD_CD', 'EMD_ENG_NM',
'EMD_KOR_NM')
# =============================================================================
# Function to read the emissions factor from South Korea* (*particular case)
# =============================================================================
def read_emissions_factor_SK(input_dir, EmisFactor_list, sep = ';'):
start_time = time.time()
input_dir = input_dir
ef_list = EmisFactor_list #ef_file #['gasoline.csv'] #
# sep = ';'
final_EF = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------
# **TD DSA 2021 de <NAME> - rapport de <NAME>**
# ------------------------- -------------------------------------
# # Un dernier Essai : BERT
# Après avoir utilisé directement les modèles préentrainé, je souhaitais essayer d'ajuster véritablement un modèle de Deep Learning.
#
# J'ai alors trouvé un article sur `medium` présentant une telle adaptation :
# source : https://scottmduda.medium.com/fine-tuning-language-models-for-sentiment-analysis-91db72396549
#
# github : https://github.com/dontmindifiduda/financial_statement_sentiment_analysis/
# La logique consiste à aller chercher un modèle préentrainé sur HuggingFace. Ici trous variantes sont testées :
#
# - `BERT` : le modèle de référence de l'encodage bidirectionnel initialement publié par Google
# - `DistilBERT` : la version allégée de `BERT` pour des performances a priori comparables
# - `RoBERTa` : la variante de Facebook de `BERT` renonçant à l'objectif de prédiction de la phrase suivante et ayant été entraîné avec plus de données et des séquences d'apprentissage plus longues
#
# In[1]:
import numpy as np
import pandas as pd
import os
import re
import time
import datetime
import string
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from collections import Counter, defaultdict
import transformers
from transformers import BertModel, BertTokenizer, DistilBertTokenizer, RobertaModel, RobertaTokenizer
from transformers import AutoConfig, AutoModel, AdamW, get_linear_schedule_with_warmup
import torch
from torch import nn, optim
from torch.utils.data import Dataset, random_split, DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, classification_report
import mlflow
import gc
# ## Chargement des données
# In[2]:
# On Importe les données
#df
df_train=pd.read_parquet('/mnt/data/interim/df_train.gzip')
df_val=pd.read_parquet('/mnt/data/interim/df_val.gzip')
df_test=pd.read_parquet('/mnt/data/interim/df_test.gzip')
#X
X_train= | pd.read_parquet('/mnt/data/interim/X_train.gzip') | pandas.read_parquet |
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from sid.config import DTYPE_VIRUS_STRAIN
def prepare_virus_strain_factors(
virus_strains: Dict[str, List[str]], params: pd.DataFrame
) -> Dict[str, Union[List[str], np.ndarray]]:
"""Prepare the information on virus strains and factors for infectiousness.
This function recreates the dictionary to not change the original value in partialed
function and adds the factors.
The ``contagiousness_factor`` explains how contagious a virus strain is, in
comparison to the base strain. The ``immunity_resistance_factor`` explains how well
the immunity level guards from (re)infection, dependent on the strain. The infection
probability is multiplied with: (1 - (1 - immunity_resistance_factor) * immunity),
so that higher values reduce the effect of immunity.
"""
if len(virus_strains["names"]) == 1:
contagiousness_factor = np.ones(1)
immunity_resistance_factor = np.zeros(1)
else:
factors = np.array(
[
params.loc[
(
"virus_strain",
name,
["contagiousness_factor", "immunity_resistance_factor"],
),
"value",
]
for name in virus_strains["names"]
]
)
if (factors < 0).any():
raise ValueError("Factors of 'virus_strains' cannot be smaller than 0.")
contagiousness_factor, immunity_resistance_factor = factors.T
contagiousness_factor = contagiousness_factor / contagiousness_factor.max()
new_virus_strains = {
"names": virus_strains["names"],
"contagiousness_factor": contagiousness_factor,
"immunity_resistance_factor": immunity_resistance_factor,
}
return new_virus_strains
def combine_first_factorized_infections(
first: np.ndarray, second: np.ndarray
) -> np.ndarray:
"""Combine factorized infections where the first has precedence."""
combined = second.copy()
combined[first >= 0] = first[first >= 0]
return combined
def categorize_factorized_infections(
factorized_infections: Union[pd.Series, np.ndarray], virus_strains: Dict[str, Any]
) -> pd.Series:
"""Convert factorized infections with virus strains to a categorical."""
return pd.Series(
pd.Categorical(
factorized_infections, categories=range(-1, len(virus_strains["names"]))
)
.rename_categories(["not_infected"] + virus_strains["names"])
.remove_categories("not_infected")
)
def factorize_initial_infections(
infections: pd.DataFrame, virus_strains: Dict[str, Any]
) -> pd.DataFrame:
"""Factorize multiple boolean or categorical infections."""
all_columns_boolean = (infections.dtypes == np.bool).all()
only_one_virus = len(virus_strains["names"]) == 1
all_columns_categorical = (infections.dtypes == "category").all()
if (all_columns_boolean and only_one_virus) or all_columns_categorical:
factorized_infections = | pd.DataFrame(index=infections.index) | pandas.DataFrame |
from flask import Blueprint, request, jsonify, make_response, url_for
from flask.views import MethodView
from io import StringIO
from marshmallow import ValidationError
import pandas as pd
from sfa_api import spec
from sfa_api.utils import storage
from sfa_api.schema import (ObservationSchema, ObservationLinksSchema,
ObservationValueSchema, ObservationPostSchema)
class AllObservationsView(MethodView):
def get(self, *args):
"""
---
summary: List observations.
description: List all observations that the user has access to.
tags:
- Observations
responses:
200:
description: A list of observations
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ObservationMetadata'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
observations = storage.list_observations()
return ObservationSchema(many=True).jsonify(observations)
def post(self, *args):
"""
---
summary: Create observation.
tags:
- Observations
description: Create a new Observation by posting metadata.
requestBody:
description: JSON respresentation of an observation.
required: True
content:
application/json:
schema:
$ref: '#/components/schemas/ObservationDefinition'
responses:
201:
description: Observation created successfully
content:
application/json:
schema:
$ref: '#/components/schemas/ObservationMetadata'
400:
$ref: '#/components/responses/400-BadRequest'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
data = request.get_json()
try:
observation = ObservationPostSchema().loads(data)
except ValidationError as err:
return jsonify(err.messages), 400
else:
obs_id = storage.store_observation(observation)
response = make_response('Observation created.', 201)
response.headers['Location'] = url_for('observations.single',
obs_id=obs_id)
return response
class ObservationView(MethodView):
def get(self, obs_id, **kwargs):
"""
---
summary: Get Observation options.
description: List options available for Observation.
tags:
- Observations
responses:
200:
description: Observation options retrieved successfully.
content:
application/json:
schema:
$ref: '#/components/schemas/ObservationLinks'
400:
$ref: '#/components/responses/400-BadRequest'
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
observation = storage.read_observation(obs_id)
if observation is None:
return 404
return ObservationLinksSchema().jsonify(observation)
def delete(self, obs_id, *args):
"""
---
summary: Delete observation.
description: Delete an Observation, including its values and metadata.
tags:
- Observations
parameters:
- $ref: '#/components/parameters/obs_id'
responses:
200:
description: Observation deleted successfully.
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
deletion_result = storage.delete_observation(obs_id)
return deletion_result
class ObservationValuesView(MethodView):
def get(self, obs_id, *args):
"""
---
summary: Get Observation data.
description: Get the timeseries values from the Observation entry.
tags:
- Observations
parameters:
- $ref: '#/components/parameters/obs_id'
responses:
200:
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ObservationValue'
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
errors = []
start = request.args.get('start', None)
end = request.args.get('end', None)
if start is not None:
try:
start = pd.Timestamp(start)
except ValueError:
errors.append('Invalid start date format')
if end is not None:
try:
end = | pd.Timestamp(end) | pandas.Timestamp |
import os
import numpy as np
from openpyxl import load_workbook
import pandas as pd
import pytest
from geochem_dataset.excel import Dataset
from geochem_dataset.excel.dataclasses import Document
from geochem_dataset.excel.exceptions import IntegrityError
from helpers.utils import xlref, xlrowref, xlcolref
TEST_FILE_NAME = 'DOCUMENT.xlsx'
TEST_SHEET_NAME = 'DOCUMENT'
TEST_COLUMNS = ('RECOMMENDED_CITATION',)
TEST_DATA = [
('A test citation',)
]
ERROR_MESSAGES = {
'missing_worksheet': 'Worksheet {worksheet} is missing from workbook {workbook}',
'missing_columns': 'Worksheet {workbook}::{worksheet} is missing columns: {column_names}',
'extra_columns': 'Worksheet {workbook}::{worksheet} has extra columns: {column_names}',
'too_few_rows': 'Worksheet {workbook}::{worksheet} has too few rows (min is {min_rows} and max is {max_rows})',
'too_many_rows': 'Worksheet {workbook}::{worksheet} has too many rows (min is {min_rows} and max is {max_rows})',
}
class TestDocuments:
def test_documents(self, dataset_path):
# Build expected
expected_documents = [Document(*args) for args in TEST_DATA]
# Assert
with Dataset(dataset_path) as dataset:
documents = list(dataset.documents)
assert documents == expected_documents
def test_documents_with_empty_file(self, dataset_path):
# Modify documents file
document_path = dataset_path / TEST_FILE_NAME
os.truncate(document_path, 0)
# Assert
with pytest.raises(ValueError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
def test_documents_with_missing_sheet(self, dataset_path):
# Modify
documents_path = dataset_path / TEST_FILE_NAME
wb = load_workbook(documents_path)
ws = wb[TEST_SHEET_NAME]
ws.title = "Skittles"
wb.save(documents_path)
# Expected
expected_error_msg_kwargs = {
'workbook': TEST_FILE_NAME,
'worksheet': TEST_SHEET_NAME,
}
# Assert
with pytest.raises(IntegrityError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
assert excinfo.value.args[0] == ERROR_MESSAGES['missing_worksheet'].format(**expected_error_msg_kwargs)
def test_documents_with_missing_columns(self, dataset_path):
document_path = dataset_path / TEST_FILE_NAME
with pd.ExcelWriter(document_path) as writer:
df = | pd.DataFrame() | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.fs import LocalFileSystem, SubTreeFileSystem
from pyarrow.tests.parquet.common import (
parametrize_legacy_dataset, parametrize_legacy_dataset_not_supported)
from pyarrow.util import guid
from pyarrow.vendored.version import Version
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import (_read_table, _test_dataframe,
_write_table)
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
from pyarrow.tests.parquet.common import (_roundtrip_pandas_dataframe,
alltypes_sample)
except ImportError:
pd = tm = None
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema, check_metadata=True)
assert table1.schema.equals(table2.schema)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_column_multiindex(tempdir, use_legacy_dataset):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(
tempdir, use_legacy_dataset
):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
read_metadata = table_read.schema.metadata
assert arrow_table.schema.metadata == read_metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# TODO(dataset) duplicate column selection actually gives duplicate columns now
@pytest.mark.pandas
@parametrize_legacy_dataset_not_supported
def test_pandas_column_selection(tempdir, use_legacy_dataset):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(
filename, columns=['uint8'], use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(
filename, columns=['uint8', 'uint8'],
use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_native_file_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_pandas_column_subset(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(
reader, columns=['strings', 'uint8'],
use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_empty_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_can_write_nested_data(tempdir):
data = {
"agg_col": [
{"page_type": 1},
{"record_type": 1},
{"non_consecutive_home": 0},
],
"uid_first": "1001"
}
df = pd.DataFrame(data=data)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
# This succeeds under V2
_write_table(arrow_table, imos)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_pyfile_roundtrip(tempdir, use_legacy_dataset):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_configuration_options(tempdir, use_legacy_dataset):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.6',
use_dictionary=use_dictionary)
table_read = _read_table(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.6',
write_statistics=write_statistics)
table_read = _read_table(filename,
use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
if (compression != 'NONE' and
not pa.lib.Codec.is_available(compression)):
continue
_write_table(arrow_table, filename, version='2.6',
compression=compression)
table_read = _read_table(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_index_column_name_duplicate(tempdir, use_legacy_dataset):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path, use_legacy_dataset=use_legacy_dataset)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_multiindex_duplicate_values(tempdir, use_legacy_dataset):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename, use_legacy_dataset=use_legacy_dataset)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_naming(datadir, use_legacy_dataset):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(
datadir / 'v0.7.1.parquet', use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_multi_level_named(
datadir, use_legacy_dataset
):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet',
use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_multi_level_some_named(
datadir, use_legacy_dataset
):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet',
use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_column_metadata_handling(
datadir, use_legacy_dataset
):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path, use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(
path, columns=['a'], use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_categorical_index_survives_roundtrip(use_legacy_dataset):
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(
bos.getvalue(), use_legacy_dataset=use_legacy_dataset).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_categorical_order_survives_roundtrip(use_legacy_dataset):
# ARROW-6302
df = pd.DataFrame({"a": pd.Categorical(
["a", "b", "c", "a"], categories=["b", "c", "d"], ordered=True)})
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
contents = bos.getvalue()
result = pq.read_pandas(
contents, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_categorical_na_type_row_groups(use_legacy_dataset):
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version='2.6', chunk_size=10)
result = pq.read_table(
buf.getvalue(), use_legacy_dataset=use_legacy_dataset)
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_categorical_roundtrip(use_legacy_dataset):
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(
buf.getvalue(), use_legacy_dataset=use_legacy_dataset).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_pandas_preserve_extensiondtypes(
tempdir, use_legacy_dataset
):
# ARROW-8251 - preserve pandas extension dtypes in roundtrip
if Version(pd.__version__) < Version("1.0.0"):
pytest.skip("__arrow_array__ added to pandas in 1.0.0")
df = pd.DataFrame({'part': 'a', "col": [1, 2, 3]})
df['col'] = df['col'].astype("Int64")
table = pa.table(df)
pq.write_to_dataset(
table, str(tempdir / "case1"), partition_cols=['part'],
use_legacy_dataset=use_legacy_dataset
)
result = pq.read_table(
str(tempdir / "case1"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result[["col"]], df[["col"]])
pq.write_to_dataset(
table, str(tempdir / "case2"), use_legacy_dataset=use_legacy_dataset
)
result = pq.read_table(
str(tempdir / "case2"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(result[["col"]], df[["col"]])
pq.write_table(table, str(tempdir / "data.parquet"))
result = pq.read_table(
str(tempdir / "data.parquet"), use_legacy_dataset=use_legacy_dataset
).to_pandas()
| tm.assert_frame_equal(result[["col"]], df[["col"]]) | pandas.testing.assert_frame_equal |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
mask = ~algorithms.isin(value, self.categories)
if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Series, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)
def __len__(self) -> int:
"""
The length of this Categorical.
"""
return len(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_get_values().tolist())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = f"{head[:-1]}, ..., {tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader = f"Categories ({len(self.categories)}, {dtype}): "
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {len(self)}\n{info}"
def _get_repr(self, length=True, na_rep="NaN", footer=True) -> str:
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(
self, length=length, na_rep=na_rep, footer=footer
)
result = formatter.to_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = f"[], {msg}"
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
"""
Return an item.
"""
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
deprecate_ndim_indexing(result)
return result
return self._constructor(result, dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
"""
Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
value = extract_array(value, extract_numpy=True)
# require identical categories set
if isinstance(value, Categorical):
if not is_dtype_equal(self, value):
raise ValueError(
"Cannot set a Categorical with another, "
"without identical categories"
)
if not self.categories.equals(value.categories):
new_codes = recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# else: array of True/False in Series or Categorical
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Examples
--------
>>> c = pd.Categorical(list('aabca'))
>>> c
[a, a, b, c, a]
Categories (3, object): [a, b, c]
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(
self.codes.astype("int64"), categories.size
)
counts = counts.cumsum()
_result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, _result))
return result
# reduction ops #
def _reduce(self, name, axis=0, **kwargs):
func = getattr(self, name, None)
if func is None:
raise TypeError(f"Categorical cannot perform the operation {name}")
return func(**kwargs)
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def min(self, skipna=True):
"""
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered("min")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].min()
else:
return np.nan
else:
pointer = self._codes.min()
return self.categories[pointer]
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def max(self, skipna=True):
"""
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered("max")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].max()
else:
return np.nan
else:
pointer = self._codes.max()
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
See Also
--------
pandas.unique
CategoricalIndex.unique
Series.unique
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list("baabc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list("baabc"), categories=list("abc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(
... list("baabc"), categories=list("abc"), ordered=True
... ).unique()
[b, a, c]
Categories (3, object): [a < b < c]
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype("int64")
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(
original.categories.take(uniques), dtype=original.dtype
)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
bool
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = recode_for_categories(
other.codes, other.categories, self.categories
)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
bool
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
"""
Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ["counts", "freqs"]
result.index.name = "categories"
return result
@Substitution(klass="Categorical")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import concat_categorical
return concat_categorical(to_concat)
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : Equivalent method on Series.
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
if not is_list_like(values):
values_type = type(values).__name__
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{values_type}]"
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
def replace(self, to_replace, value, inplace: bool = False):
"""
Replaces all instances of one value with another
Parameters
----------
to_replace: object
The value to be replaced
value: object
The value to replace it with
inplace: bool
Whether the operation is done in-place
Returns
-------
None if inplace is True, otherwise the new Categorical after replacement
Examples
--------
>>> s = pd.Categorical([1, 2, 1, 3])
>>> s.replace(1, 3)
[3, 2, 3, 3]
Categories (2, int64): [2, 3]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
# build a dict of (to replace -> value) pairs
if is_list_like(to_replace):
# if to_replace is list-like and value is scalar
replace_dict = {replace_value: value for replace_value in to_replace}
else:
# if both to_replace and value are scalar
replace_dict = {to_replace: value}
# other cases, like if both to_replace and value are list-like or if
# to_replace is a dict, are handled separately in NDFrame
for replace_value, new_value in replace_dict.items():
if new_value == replace_value:
continue
if replace_value in cat.categories:
if | isna(new_value) | pandas.core.dtypes.missing.isna |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
""" Use extract_amplitudes_and_phase to extract amplitude and phase for a periodic frequency
with desired frequencies to extract given (in cycles per hour) by constitfreq
Can be used to predict tide """
#NOTE not working exactly: leaves a random global phase shift for some reason
#Better to use optimisetides.py
def extract_amplitudes_and_phase(data, constitfreq, num_constits = 7):
"""LEAST SQUARES EXTRACTION OF TIDAL CONSTITUENT AMPLITUDE AND PHASE FOR GIVEN OBSERVATIONS
data: 2D arraylike, column 1 is time (units of hour) and column 2 is water level
constitfreq: 1D arraylike of desired frequencies (in cycles per hour) ie the constituents
num_constituents: the number of constituents to extract amplitude and phase data for
Returns: DataFrame """
if np.mod(len(data),2)==0: #remove last observation if even number of observations supplied
data=data[0:-1]
T = data[:,0] #water levels
H = data[:,1] #measurement times (units of hour)
freqs = constitfreq[0:num_constits+1].copy() #desired constituents (read from top of list down)
num_N = int(len(H)/2 - 0.5)
Nvals = np.arange(-num_N,num_N+1) # a cycle (-N, -N+1, -N+2 .... N-1, N) for use later
num_M = num_constits
t0 = T[num_N] #midpoint time observation of set
deltaT = T[1]-T[0]
A = np.zeros((2*num_N+1,2*num_M+1)) #big badboy matrix
for i in range(2*num_N+1): #construct matrix A
lx = [np.cos(2*np.pi*freqs[j]*Nvals[i]*deltaT) for j in range(1,len(freqs))]
ly = [np.sin(2*np.pi*freqs[j]*Nvals[i]*deltaT) for j in range(1,len(freqs))]
A[i,:] = [1] + lx + ly
# now we solve (A^T*A)Z = (A^T)H where Z = (x0,x1,...,xM,y1,y2,....,yM)
ATH = np.dot(A.T,H)
ATA = np.dot(A.T,A)
Z = np.linalg.solve(ATA,ATH) #the resulting Z = (x0,x1,...,xM,y1,y2,....,yM)
# where xj = Rjcos(phij - freqj*t0)
# yj = Rjsin(phij - freqj*t0)
# and Rj and phij are unknown (now known) amplitude and phase for each constituent
X = Z[1:num_M+1]
Y = Z[num_M+1::]
amplitudes = [Z[0]]+[np.sqrt(X[i]**2+Y[i]**2) for i in range(num_M)]
phases = [0]+[np.mod(np.arcsin(X[i]/amplitudes[i+1])+2*np.pi*freqs[i+1]*t0,2*np.pi) for i in range(num_M)]
return pd.DataFrame(np.c_[amplitudes,freqs,phases],columns = ['amplitudes','freqs','phases'])
def import_tide_constituents(filename='tide_constituents.csv'):
""" reads a csv file with the tidal constituents frequency (cycle per hour) in a column names frequency """
constits = pd.read_csv(filename)
constits = constits['frequency'].values
constits = np.hstack((0,constits)) # add 0 as a constituent
return constits
def import_tide_csv_qldopendata(filename):
"""Imports file and converts date to a pd.datetime column
filename points to a csv file downloaded from https://www.msq.qld.gov.au/Tides/Open-data
returns a 2d array [time, water level] with time in epoch hours """
tides = pd.read_csv(filename,sep=' ',header=None,engine='python')
tides.columns=['input_datetime','swl']
tides['input_datetime']=tides['input_datetime'].apply(lambda x: str(x))
tides['input_datetime']=tides['input_datetime'].apply(lambda x: '0'+x if len(x)==11 else x)
tides['date']= | pd.to_datetime(tides['input_datetime'],format='%d%m%Y%H%M') | pandas.to_datetime |
import pandas as pd
import os
# 在CVDP场景下,为每个项目生成干净的训练集测试集,噪音的训练集测试集
if __name__ == "__main__":
list_projectName = ["shiro","maven","flume","mahout","calcite","pdfbox","iotdb","tika"];
# "snoring_labels"用于读取snoring时间节点,以生成对应时间的数据集
# 读取路径
path_common = "D:/workspace/DataFolder/labels_MASZZ/";
path_common_versionReleaseDate = path_common + "versionReleaseDate/";
# 读取文件名
read_fileName_versionReleaseDate = "releaseDate.csv";
# 存储路径
path_saved_common = "D:/workspace/DataFolder/data_csv/dataset_new/";
# 存储文件名,除去项目名外的文件名
fileName_train_clean = "_train_clean.csv";
fileName_train_noise = "_train_noise.csv";
fileName_test_clean = "_test_clean.csv";
fileName_test_noise = "_test_noise.csv";
for i_projectName in list_projectName:
#获得打鼾时间点对应的版本标签
path_versionReleaseDate = path_common_versionReleaseDate + i_projectName + '/' + read_fileName_versionReleaseDate;
df_versionReleaseDate = pd.read_csv(path_versionReleaseDate);
#===分别在截止时间点前(打鼾噪音数据集)和最新时间点(即干净数据集),收集缺陷标签===#
print(i_projectName);
index_cutoff_train = df_versionReleaseDate[df_versionReleaseDate['cutoff']=='train'].index[0];
index_cutoff_test = df_versionReleaseDate[df_versionReleaseDate['cutoff']=='test'].index[0];
df_targetVersions_train = df_versionReleaseDate[index_cutoff_train+1:]
df_targetVersions_train = df_targetVersions_train.reset_index(drop=True)
df_targetVersions_test = df_versionReleaseDate[index_cutoff_test+1:index_cutoff_train+1]
df_targetVersions_test = df_targetVersions_test.reset_index(drop=True)
#读取路径
path_train_clean = "%s/groundtruth_labels/mappingBugsToVersions/%s/"%(path_common,i_projectName);
path_test_clean = "%s/groundtruth_labels/mappingBugsToVersions/%s/"%(path_common,i_projectName);
path_train_noise = "%s/snoringTrain_labels/mappingBugsToVersions/%s/"%(path_common,i_projectName);
path_test_noise = "%s/snoringTest_labels/mappingBugsToVersions/%s/"%(path_common,i_projectName);
#读取目标版本的标签,生成训练集
df_train_clean_all = pd.DataFrame();
df_train_noise_all = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
#Configuration items
import numpy as np
import pandas as pd
######################
#Character Sheet Items
class Ab_Sco:
def __init__(self, Str, Dex, Con, Int, Wis, Cha):
self.Str = Str
self.Dex = Dex
self.Con = Con
self.Int = Int
self.Wis = Wis
self.Cha = Cha
Ability_Scores = Ab_Sco(0, 0, 0, 0, 0, 0)
Size = ""
Level = 1
Class = ""
Race = ""
class Background:
def __init__(self, trait, ideal, bond, flaw):
self.trait = trait
self.ideal = ideal
self.bond = bond
self.flaw = flaw
bg = Background("", "", "", "")
Alignment = ""
Exp = 0
Prof_Bonus = 0
#Format: [IsProficient, Score]
SaveThrow = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
#Format: [Name, IsProficient, Score]
Skills = [["Acrobatics", 0, 0],["Animal Handling", 0, 0],["Arcana", 0, 0],["Athletics", 0, 0],["Deception", 0, 0],["History", 0, 0],["Insight", 0, 0],["Intimidation", 0, 0],["Investigation", 0, 0],["Medicine", 0, 0],["Nature", 0, 0],["Perception", 0, 0],["Performance", 0, 0],["Persuasion", 0, 0],["Religion", 0, 0],["Sleight of Hand", 0, 0],["Stealth", 0, 0],["Survival", 0, 0]]
#Passive_Percept = 0 #Passive Perception is just 10 + perception mod --> can be calculated on the fly
AC = 0
Initiative = 0
Speed = 0
Max_HP = 0 #No need to calculate current/temp --> generating characters default to max hp
Hit_Die_Type = ""
Hit_Die_Num = 0
Equipment = pd.DataFrame(columns=['Name'])
class Currency:
def __init__(self, cp, sp, ep, gp, pp):
self.cp = cp
self.sp = sp
self.ep = ep
self.gp = gp
self.pp = pp
Money = Currency(0, 0, 0, 0, 0)
class Proficiencies:
def __init__(self, armor, weapon, language, tool):
self.armor = armor
self.weapon = weapon
self.language = language
self.tool = tool
Prof = Proficiencies([], [], [], [])
Features = pd.DataFrame(columns=['Name', 'Desc'])
Spells0 = pd.DataFrame(columns=['Prep', 'Name'])
Spells1 = pd.DataFrame(columns=['Prep', 'Name'])
Spells2 = pd.DataFrame(columns=['Prep', 'Name'])
Spells3 = pd.DataFrame(columns=['Prep', 'Name'])
Spells4 = pd.DataFrame(columns=['Prep', 'Name'])
Spells5 = pd.DataFrame(columns=['Prep', 'Name'])
Spells6 = pd.DataFrame(columns=['Prep', 'Name'])
Spells7 = pd.DataFrame(columns=['Prep', 'Name'])
Spells8 = pd.DataFrame(columns=['Prep', 'Name'])
Spells9 = | pd.DataFrame(columns=['Prep', 'Name']) | pandas.DataFrame |
import pathlib
import datetime
import time
import uuid
import pandas as pd
import numpy as np
import simpy
import dill as pickle
import openclsim.model
def save_logs(simulation, location, file_prefix):
# todo add code to LogSaver to allow adding a file_prefix to each file
site_logs = list(simulation.sites.values())
equipment_logs = list(simulation.equipment.values())
activity_logs = [
activity["activity_log"] for activity in simulation.activities.values()
]
logsaver = LogSaver(
site_logs,
equipment_logs,
activity_logs,
location=location,
file_prefix=file_prefix,
overwrite=True,
append_to_existing=False,
)
logsaver.save_all_logs()
class ToSave:
"""
Class that defines objects that have to be saved.
data_type is the object type: ship, site, crane, etc.
data: is the dictionary that is used to fill the data_type
"""
def __init__(self, data_type, data, *args, **kwargs):
# This is the case for activities
if data_type == openclsim.model.Activity:
self.data_type = "Activity"
self.data = {
"name": data["name"],
"id": data["id"],
"mover": data["mover"].name,
"loader": data["loader"].name,
"unloader": data["unloader"].name,
"origin": data["origin"].name,
"destination": data["destination"].name,
"stop_event": None, # data["stop_event"],
"start_event": None,
} # data["start_event"]
# This is the case for equipment and sites
elif type(data_type) == type:
self.data_type = []
for subclass in data_type.__mro__:
if (
subclass.__module__ == "openclsim.core"
and subclass.__name__ not in ["Identifiable", "Log", "SimpyObject"]
):
self.data_type.append(subclass.__name__)
self.data = data
self.data["env"] = None
class SimulationSave:
"""
SimulationSave allows save all obtained data.
Environment: The simpy environment
Activities: List element with 'ToSave' classes of all unique activities
Equipment: List element with 'ToSave' classes of all unique pieces of equipment
Sites: List element with 'ToSave' classes of all unique sites
"""
def __init__(self, environment, activities, equipment, sites, *args, **kwargs):
""" Initialization """
# Generate unique ID for the simulation
self.id = str(uuid.uuid1())
# Save the environment
# assert type(environment) == simpy.core.Environment
self.simulation_start = environment.now
# Save all properties
assert type(activities) == list
self.activities = activities
assert type(equipment) == list
self.equipment = equipment
assert type(sites) == list
self.sites = sites
# Save the initialization properties
self.init = self.init_properties
@property
def init_properties(self):
"""
Save all properties of the simulation
"""
return {
"ID": self.id,
"Simulation start": self.simulation_start,
"Activities": self.activities,
"Equipment": self.equipment,
"Sites": self.sites,
}
def save_ini_file(self, filename, location=""):
"""
For all items of the simulation, save the properties and generate an initialization file.
This file should be a JSON format and readable to start a new simulation.
If location is "", the init will be saved in the current working directory.
"""
# assure location is a path
location = pathlib.Path(location)
file_name = location / (filename + ".pkl")
with open(file_name, "wb") as file:
pickle.dump(self.init, file)
class SimulationOpen:
"""
SimulationOpen allows to define simulations from .pkl files.
If location is "", the init will be saved in the current working directory.
"""
def __init__(self, file_name):
""" Initialization """
self.simulation = self.open_ini_file(file_name)
def open_ini_file(self, file_name):
"""
For all items of the simulation, save the properties and generate an initialization file.
This file should be a JSON format and readable to start a new simulation.
If location is "", the init will be saved in the current working directory.
"""
with open(file_name, "rb") as file:
return pickle.load(file)
def extract_files(self):
environment = simpy.Environment(
initial_time=self.simulation["Simulation start"]
)
environment.epoch = time.mktime(
datetime.datetime.fromtimestamp(
self.simulation["Simulation start"]
).timetuple()
)
sites = []
equipment = []
for site in self.simulation["Sites"]:
site_object = openclsim.model.get_class_from_type_list(
"Site", site.data_type
)
site.data["env"] = environment
sites.append(site_object(**site.data))
for ship in self.simulation["Equipment"]:
ship_object = openclsim.model.get_class_from_type_list(
"Ship", ship.data_type
)
ship.data["env"] = environment
equipment.append(ship_object(**ship.data))
activities = []
for activity in self.simulation["Activities"]:
data = activity.data
mover = [i for i in equipment if i.name == data["mover"]][0]
loader = [i for i in equipment if i.name == data["loader"]][0]
unloader = [i for i in equipment if i.name == data["unloader"]][0]
origin = [i for i in sites if i.name == data["origin"]][0]
destination = [i for i in sites if i.name == data["destination"]][0]
activities.append(
openclsim.model.Activity(
env=environment, # The simpy environment defined in the first cel
name=data["name"], # We are moving soil
ID=data["id"], # The id
origin=origin, # We originate from the from_site
destination=destination, # And therefore travel to the to_site
loader=loader, # The benefit of a TSHD, all steps can be done
mover=mover, # The benefit of a TSHD, all steps can be done
unloader=unloader,
)
) # The benefit of a TSHD, all steps can be done
return sites, equipment, activities, environment
class LogSaver:
"""
LogSaver allow saving all logs as .csv files.
Objects should be a list containing the activities, sites and equipment.
The ID could be the ID that is saved to the .pkl file, entering an ID is optional.
If location is "", the files will be saved in the current working directory.
"""
def __init__(
self,
sites,
equipment,
activities,
simulation_id="",
simulation_name="",
location="",
file_prefix="",
overwrite=False,
append_to_existing=True,
):
""" Initialization """
# Save all properties
assert type(activities) == list
self.activities = activities
assert type(equipment) == list
self.equipment = equipment
assert type(sites) == list
self.sites = sites
# Save simulation id and simulation name
self.id = simulation_id if simulation_id else str(uuid.uuid1())
self.name = simulation_name if simulation_name else self.id
# Define location to save files
self.location = location
if len(self.location) != 0 and self.location[-1] != "/":
self.location += "/"
self.location += file_prefix
# Finally save all items
self.overwrite = overwrite
self.append_to_existing = append_to_existing
def save_all_logs(self):
"""
Save all logs to a specified location.
If location is "", the logs will be saved in the current working directory.
A file is saved with unique events -- events.csv
A file is saved with unique location objects -- locations.csv
A file is saved with unique equipment objects -- equipment.csv
A file is saved with unique activity objects -- activities.csv
A file is saved with unique simulations -- simulations.csv
A file is saved with equipment logs -- equipment_log.csv
A file is saved with energy use -- energy_use.csv
A file is saved with dredging spill info -- dredging_spill.csv
A file is saved with simulation properties -- generic_results.csv
"""
# First get all unique properties
# Obtain information on simulations
simulation_dict = {"SimulationID": [], "SimulationName": []}
self.get_unique_properties("simulations", simulation_dict)
# Obtain information on activities
activity_dict = {
"ActivityID": [],
"ActivityName": [],
"EquipmentID": [],
"ActivityFunction": [],
}
self.get_unique_properties("activities", activity_dict)
# Obtain information on equipment
equipment_dict = {"EquipmentID": [], "EquipmentName": []}
self.get_unique_properties("equipment", equipment_dict)
# Obtain information on locations
location_dict = {
"LocationID": [],
"LocationName": [],
"Longitude": [],
"Latitude": [],
}
self.get_unique_properties("location", location_dict)
# Obtain information on events
event_dict = {"EventID": [], "EventName": []}
self.get_unique_properties("events", event_dict)
# Continue with obtaining the logs, energy use and dredging spill
self.get_equipment_log()
self.get_energy()
self.get_spill()
self.get_results()
# Save all as csv files
self.generic_results.to_csv(self.location + "generic_results.csv", index=False)
self.dredging_spill.to_csv(self.location + "dredging_spill.csv", index=False)
self.energy_use.to_csv(self.location + "energy_use.csv", index=False)
self.equipment_log.to_csv(self.location + "equipment_log.csv", index=False)
self.unique_events.to_csv(self.location + "events.csv", index=False)
self.unique_activities.to_csv(self.location + "activities.csv", index=False)
self.unique_equipment.to_csv(self.location + "equipment.csv", index=False)
self.unique_locations.to_csv(self.location + "locations.csv", index=False)
self.unique_simulations.to_csv(self.location + "simulations.csv", index=False)
def get_unique_properties(self, object_type, object_dict):
"""
Obtain unique properties for the given list
"""
if self.append_to_existing:
try:
unique_df = pd.read_csv(self.location + object_type + ".csv")
except FileNotFoundError:
unique_df = pd.DataFrame.from_dict(object_dict)
else:
unique_df = pd.DataFrame.from_dict(object_dict)
if object_type == "simulations":
self.unique_simulations = self.append_dataframe(
unique_df, self, "Simulation"
)
elif object_type == "activities":
for activity in self.activities:
unique_df = self.append_dataframe(unique_df, activity, "Activity")
self.unique_activities = unique_df
elif object_type == "equipment":
for piece in self.equipment:
unique_df = self.append_dataframe(unique_df, piece, "Equipment")
self.unique_equipment = unique_df
elif object_type == "events":
for piece in self.equipment:
unique_df = self.event_dataframe(unique_df, piece)
self.unique_events = unique_df
elif object_type == "location":
for site in self.sites:
unique_df = self.append_dataframe(unique_df, site, "Location")
self.unique_locations = unique_df
def append_dataframe(self, existing_df, object_id, object_type):
"""
Check if dataframe is alfready filled with information, if not append.
If it is filled with similar values, raise an error unless self.overwrite == True.
"""
if object_id.id not in list(existing_df[object_type + "ID"]):
if object_type != "Location" and object_type != "Activity":
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
},
ignore_index=True,
)
elif object_type == "Activity":
# lookup the equipment
# TODO: clean this up, it's now not filled in for move activities
loader_id = ""
if hasattr(object_id, "loader"):
loader_id = object_id.loader.id
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"EquipmentID": loader_id,
"ActivityFunction": "Loader",
},
ignore_index=True,
)
mover_id = ""
if hasattr(object_id, "mover"):
mover_id = object_id.mover.id
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"EquipmentID": mover_id,
"ActivityFunction": "Mover",
},
ignore_index=True,
)
unloader_id = ""
if hasattr(object_id, "unloader"):
unloader_id = object_id.unloader.id
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"EquipmentID": unloader_id,
"ActivityFunction": "Unloader",
},
ignore_index=True,
)
elif object_type == "Location":
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"Longitude": object_id.geometry.x,
"Latitude": object_id.geometry.y,
},
ignore_index=True,
)
elif self.overwrite == True:
existing_df = existing_df[existing_df[object_type + "ID"] != object_id.id]
if object_type != "Location":
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
},
ignore_index=True,
)
else:
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"Longitude": object_id.geometry.x,
"Latitude": object_id.geometry.y,
},
ignore_index=True,
)
else:
raise KeyError(
"Simulation ID or simulation name already exist. "
+ "If you wish to overwrite the existing data, set overwrite to True"
)
return existing_df
def event_dataframe(self, existing_df, piece):
"""
Check if dataframe is alfready filled with information, if not append.
If it is filled with similar values, raise an error unless self.overwrite == True.
"""
log = pd.DataFrame.from_dict(piece.log)
events = list(log["Message"].unique())
for event in events:
if "start" in event or "stop" in event:
event = event.replace(" start", "")
event = event.replace(" stop", "")
if event not in list(existing_df["EventName"]):
existing_df = existing_df.append(
{"EventID": str(uuid.uuid1()), "EventName": event},
ignore_index=True,
)
return existing_df
def get_equipment_log(self):
"""
Create a dataframe from all equipment logs
"""
object_dict = {
"SimulationID": [],
"ObjectID": [],
"EventID": [],
"ActivityID": [],
"LocationID": [],
"EventStart": [],
"EventStop": [],
}
try:
unique_df = pd.read_csv(self.location + "equipment_log.csv")
except FileNotFoundError:
unique_df = pd.DataFrame.from_dict(object_dict)
for piece in self.equipment:
object_log = pd.DataFrame.from_dict(piece.log)
for i, message in enumerate(object_log["Message"]):
for j, event in enumerate(self.unique_events["EventName"]):
if message == event + " start":
object_dict["SimulationID"].append(self.id)
object_dict["ObjectID"].append(piece.id)
object_dict["EventID"].append(self.unique_events["EventID"][j])
object_dict["ActivityID"].append(object_log["ActivityID"][i])
object_dict["EventStart"].append(object_log["Timestamp"][i])
x, y = object_log["Geometry"][i].x, object_log["Geometry"][i].y
for k, LocationID in enumerate(
self.unique_locations["LocationID"]
):
if (
x == self.unique_locations["Longitude"][k]
and y == self.unique_locations["Latitude"][k]
):
object_dict["LocationID"].append(LocationID)
elif message == event + " stop":
object_dict["EventStop"].append(object_log["Timestamp"][i])
# Create durations column
object_df = pd.DataFrame.from_dict(object_dict)
durations = object_df["EventStop"] - object_df["EventStart"]
durations_days = []
for event in durations:
durations_days.append(event.total_seconds() / 3600 / 24)
object_df["EventDuration"] = durations_days
# Check if combination of simulation ID and object ID already exists
if len(unique_df["SimulationID"]) == 0:
unique_df = object_df
elif not (unique_df["SimulationID"] == self.id).any():
unique_df = pd.concat([unique_df, object_df], ignore_index=True)
elif self.overwrite == True:
drop_rows = []
for i, row in enumerate(unique_df["SimulationID"] == self.id):
if row == True:
drop_rows.append(i)
unique_df = unique_df.drop(drop_rows, axis=0)
unique_df = pd.concat([unique_df, object_df], ignore_index=True)
else:
raise KeyError(
"Simulation ID or simulation name already exist. "
+ "If you wish to overwrite the existing data, set overwrite to True"
)
self.equipment_log = unique_df
def get_spill(self):
"""
Obtain a log of all dreding spill
"""
object_dict = {
"SimulationID": [],
"ObjectID": [],
"EventID": [],
"ActivityID": [],
"LocationID": [],
"SpillStart": [],
"SpillStop": [],
"SpillDuration": [],
"Spill": [],
}
try:
unique_df = pd.read_csv(self.location + "dredging_spill.csv")
except FileNotFoundError:
unique_df = pd.DataFrame.from_dict(object_dict)
for piece in self.equipment:
object_log = | pd.DataFrame.from_dict(piece.log) | pandas.DataFrame.from_dict |
"""
training.py
=================
Description:
Author:
Usage:
"""
import sklearn
import numpy
import os
import csv
import sys
import random
import ast
import SimpleITK as sitk
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import json
from .vesselness import compute_absolute_eigen_values
def get_list_of_features():
"""
This function...
:return:
"""
from ..workflow import get_local_file_location
_file = open(
get_local_file_location(os.path.join("maclearn", "data_order.json")), "rb"
)
features = json.load(_file)
_file.close()
return features
def remove_keys_from_array(array, keys):
"""
This function...
:param array:
:param keys:
:return:
"""
for key in keys:
array.remove(key)
return array
def mask_with_abc_image(image, abc_image):
"""
This function...
:param image:
:param abc_image:
:return:
"""
abc_mask = get_brainmask(abc_image)
masked_image = sitk.Mask(image, abc_mask)
return masked_image
def binary_close(image, amount=1):
"""
This function...
:param image:
:param amount:
:return:
"""
image = sitk.BinaryErode(image, amount)
image = sitk.BinaryDilate(image, amount)
return image
def get_brainmask(abc_image):
"""
This function...
:param abc_image:
:return:
"""
exclude_image = abc_image < 0
exclude_codes = [5, 11, 12, 30]
for code in exclude_codes:
exclude_image = exclude_image + (abc_image == code)
exclude_image = binary_close(exclude_image, 2)
brainmask = abc_image * (exclude_image == 0) > 0
return brainmask
def masked_image_array(image, mask):
"""
This function...
:param image:
:param mask:
:return:
"""
return image_array(sitk.Mask(image, mask))
def mask_array_with_image(array, mask_image):
"""
This function...
:param array:
:param mask_image:
:return:
"""
mask_array = image_array(mask_image)
array[numpy.where(mask_array == 0)] = 0
return array
def mask_data_with_image(data, mask_image):
"""
This function...
:param data:
:param mask_image:
:return:
"""
for i, array in enumerate(data):
data[i] = mask_array_with_image(array, mask_image)
return data
def linear_array_from_image_file(image_file):
"""
This function...
:param image_file:
:return:
"""
image = sitk.ReadImage(image_file)
return image_array(image)
def image_array(image):
"""
Returns the 1D array of the numpy matrix
:param image:
:return:
"""
a = sitk.GetArrayFromImage(image)
a1D = a.reshape(a.size)
return a1D
def data_by_region(
data, wmtargets, wmlabelmap, wmlabels, gmtargets, gmlabelmap, gmlabels
):
"""
Takes in an label map image and devides the data and
targets into specified regions. Regoins are specified
by a label list.
:param data:
:param wmtargets:
:param wmlabelmap:
:param wmlabels:
:param gmtargets:
:param gmlabelmap:
:param gmlabels:
:return:
"""
columns = [data]
keys = ["Features", "WMRegions", "GMRegions", "Targets"]
wmregions = list()
for i, label in enumerate(wmlabels):
wmregions.append(pd.Series(wmlabelmap == label))
df_wm = pd.concat(wmregions, axis=1, keys=wmlabels)
gmregions = list()
for i, label in enumerate(gmlabels):
gmregions.append(pd.Series(gmlabelmap == label))
df_gm = | pd.concat(gmregions, axis=1, keys=gmlabels) | pandas.concat |
# Script to compare neighborhoods from two city locations
# import package
import os
import pandas as pd # library for data analysis
from bs4 import BeautifulSoup # library to parse web pages
import requests # library to handle requests
import csv
import folium # map rendering library
from sklearn.cluster import KMeans
import numpy as np
# Matplotlib and associated plotting modules
import matplotlib.cm as cm
import matplotlib.colors as colors
import json
import logging # logging module
import argparse
import matplotlib.pyplot as plt
# from bokeh.io import export_png, export_svgs
# from bokeh.models import ColumnDataSource, DataTable, TableColumn
# credentials are stored in a separate file and not committed
from credentials import CLIENT_ID, CLIENT_SECRET, VERSION, LIMIT
logger = logging.getLogger()
def return_most_common_venues(row, num_top_venues):
"""
Function to sort venues in descending order
:param row: Pandas row
:param num_top_venues: Number of rows to sort by
:return:
"""
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
# def save_df_as_image(df, path):
# """
# Save a DataFrame as a nice image
# :param df: The dataFrame
# :param path: Filename to save to
# :return:
# """
# source = ColumnDataSource(df)
# df_columns = [df.index.name]
# df_columns.extend(df.columns.values)
# columns_for_table = []
# for column in df_columns:
# columns_for_table.append(TableColumn(field=column, title=column))
#
# data_table = DataTable(source=source, columns=columns_for_table, height_policy="auto", width_policy="auto",
# index_position=None)
# export_png(data_table, filename=path)
class ProcessLocation:
"""
Class to process location and save a map of the resulting clusters
"""
def __init__(self, location, kclusters=10, num_top_venues=5):
"""
:param str location: location to get information about
:param int kclusters: Number of clusters
:param int num_top_venues: Number of venues to group
"""
self.location = location
self.coordinate_data = {}
self.kclusters = kclusters
self.num_top_venues = num_top_venues
self.rainbow = []
self.url = None
self.longitude = []
self.latitude = []
self.data_from_wikipedia = []
self.df = None
self.grouped_df = None
self.nearby_venues = None
self.neighborhoods_venues_sorted = None
self.map_clusters = None
self.read_local_coordinates_file()
self.set_rainbow()
def read_local_coordinates_file(self):
"""
read local coordinate geospatial CSV file and write data into a dictionary
:return dict: dictionary of postal codes and coordinates
"""
with open('Geospatial_Coordinates.csv') as in_file:
data = csv.DictReader(in_file)
for row in data:
self.coordinate_data[row['Postal Code']] = {'longitude': row['Longitude'],
'latitude': row['Latitude']}
return self.coordinate_data
def set_rainbow(self):
"""
set the colour scheme for the clusters
:return:
"""
x = np.arange(self.kclusters)
ys = [i + x + (i * x) ** 2 for i in range(self.kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
self.rainbow = [colors.rgb2hex(i) for i in colors_array]
def get_coordinates(self, postal_code):
"""
get the longitude and latitude for a given postal code
:param str postal_code: postal code
:return: longitude and latitude or None, None is postal code is not present
"""
ret = self.coordinate_data.get(postal_code, {})
latitude = ret.get('latitude')
longitude = ret.get('longitude')
return longitude, latitude
def get_data_from_wikipedia(self):
"""
parse wikipedia page for a given URL - set by self.get_url method
:return:
"""
self.get_url()
if self.url:
req = requests.get(self.url)
soup = BeautifulSoup(req.content, 'html.parser')
# logging.info(soup.prettify())
table = soup.find('table', attrs={'class': 'wikitable sortable'})
table_body = table.find('tbody')
# logging.info(table_body)
# get the headers of the table and store in a list
table_headers = []
headers = table_body.find_all('th')
for header in headers:
header_value = header.get_text().strip()
table_headers.append(header_value)
# get the rows of the table
rows = table_body.find_all('tr')
for row in rows:
row_data = {}
cells = row.find_all('td')
for position, cell in enumerate(cells):
value = cell.get_text().strip()
key = table_headers[position]
# add the value to a dictionary
row_data[key] = value
# check that there is some data and that Borough is not unassigned
if row_data and row_data.get('Borough', '') != 'Not assigned':
if 'Neighbourhood' in row_data:
row_data['Neighborhood'] = row_data.pop('Neighbourhood')
self.data_from_wikipedia.append(row_data)
def load_data_into_dataframe(self):
"""
Loads data from wikipedia into a Pandas dataframe
:return:
"""
if self.data_from_wikipedia:
self.df = pd.DataFrame(self.data_from_wikipedia)
# rename the postal code heading
self.df.rename(columns={"Postal Code": "PostalCode"}, inplace=True)
def add_coordinates(self):
"""
Adds coordinates (longitude, latitude) to data from wikipedia
:return:
"""
self.longitude = []
self.latitude = []
for index, row in self.df.iterrows():
postal_code = row.get('PostalCode')
row_long, row_lat = self.get_coordinates(postal_code=postal_code)
self.longitude.append(float(row_long))
self.latitude.append(float(row_lat))
self.df['Latitude'] = self.latitude
self.df['Longitude'] = self.longitude
def get_nearby_venues(self, radius=500):
"""
Get nearby venues from Foursquare
:param int radius: radius to get nearby venues
:return:
"""
names = self.df['Neighborhood']
latitudes = self.df['Latitude']
longitudes = self.df['Longitude']
venues_list = []
for name, lat, lng in zip(names, latitudes, longitudes):
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}§ion=food'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
LIMIT)
# logging.info(url)
results = requests.get(url).json()["response"]['groups'][0]['items']
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
self.nearby_venues = | pd.DataFrame([item for venue_list in venues_list for item in venue_list]) | pandas.DataFrame |
# coding: utf-8
# In[ ]:
__author__ = '<NAME>'
# get_ipython().magic('matplotlib notebook')
# from IPython.display import set_matplotlib_formats
# set_matplotlib_formats('png', 'pdf')
# from IPython.display import Image
# from IPython.display import Math
import os
import sys
import shutil
import gdal
import pickle
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import flopy as fp
import pandas as pd
import geopandas as gp
import scipy.stats as ss
import scipy.optimize as so
from scipy.interpolate import UnivariateSpline
# from ipywidgets import interact, Dropdown
# from IPython.display import display
# In[ ]:
homes = ['../Models']
fig_dir = '../Figures'
mfpth = '../executables/MODFLOW-NWT_1.0.9/bin/MODFLOW-NWT_64.exe'
mp_exe_name = '../executables/modpath.6_0/bin/mp6.exe'
mf_start_date_str = '01/01/1900'
mp_release_date_str = '01/01/2020'
num_surf_layers = 3
num_depth_groups = 5
por = 0.20
dir_list = []
mod_list = []
i = 0
for home in homes:
if os.path.exists(home):
for dirpath, dirnames, filenames in os.walk(home):
for f in filenames:
if os.path.splitext(f)[-1] == '.nam':
mod = os.path.splitext(f)[0]
mod_list.append(mod)
dir_list.append(dirpath)
i += 1
print(' {} models read'.format(i))
# ## Read and process tracer input file from TracerLPM
# In[ ]:
# read input tracers
tracer_input_raw = pd.read_excel('../data/tracer input/Copy of TracerLPM_V_1_0B.xlsm', skiprows=3, sheetname='StoredTracerData', header=0)
col_list = ['Tracer', 'CFC-12', 'CFC-11', 'CFC-13', 'SF6', '3H', 'NO3-N']
tr_list = ['CFC-12', 'CFC-11', 'CFC-13', 'SF6', '3H', 'NO3-N']
tracer_input_df = tracer_input_raw.loc[:, col_list].copy()
# delete garbage header rows
tracer_input_df = tracer_input_df.iloc[3:, :]
# delete blank rows
tracer_input_df.dropna(axis=0, how='any', inplace=True)
# make sure all the tracer data is numeric
for col in col_list:
tracer_input_df[col] = | pd.to_numeric(tracer_input_df[col]) | pandas.to_numeric |
import pandas as pd
import numpy as np
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
from scipy.stats import skew
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from mlxtend.regressor import StackingCVRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# Load The Data
TRAIN_FILE_PATH = 'C:/Users/ahlaw/Downloads/train.csv'
TEST_FILE_PATH = 'C:/Users/ahlaw/Downloads/test.csv'
training_data = pd.read_csv(TRAIN_FILE_PATH)
test_data = | pd.read_csv(TEST_FILE_PATH) | pandas.read_csv |
from openpyxl import Workbook
from openpyxl.cell.cell import Cell
from openpyxl.styles import Alignment, Font, PatternFill, Border, Side
from openpyxl.utils.dataframe import dataframe_to_rows
from verifica_autores import em_lista_autores, trata_exceçoes
from grafico import Graficos, Graficos_Anais_Periodicos
import pandas as pd
import re
import os
from valores import ND
from PyscopusModified import ScopusModified
class ExcelFile(Workbook):
def __init__(self, relatorios, autores, todos, artppg, medias, indicadores, indicadores_geral, lista_egressos, lista_alunos, excecoes, per_a1_a4, per_a1_a4_ae, periodicos, anais, periodicos_metricas, anais_metricas):
super(ExcelFile, self).__init__()
self.relatorios = relatorios
self.autores = autores
self.todos = todos
self.artppg = artppg
self.medias = medias
self.indicadores = indicadores
self.indicadores_geral = indicadores_geral
self.lista_egressos = lista_egressos
self.lista_alunos = lista_alunos
self.excecoes = excecoes
self.per_a1_a4 = per_a1_a4
self.per_a1_a4_ae = per_a1_a4_ae
self.periodicos = periodicos
self.anais = anais
self.periodicos_metricas = periodicos_metricas
self.anais_metricas = anais_metricas
for pos, egresso in enumerate(self.lista_egressos):
self.lista_egressos[pos].name = trata_exceçoes(egresso.name.strip())
for pos, aluno in enumerate(self.lista_alunos):
self.lista_alunos[pos].name = trata_exceçoes(aluno.name.strip())
self.add_info()
# self.altera_autores()
self.aplica_estilo()
# self.converte_valores()
self.aplica_dimensoes()
self.aplica_cores()
self.aplica_filtros()
def styled_cells(self, data, ws, pinta=True):
for c in data:
c = Cell(ws, column="A", row=1, value=c)
if c.value != None and str(c.value) != "nan":
if c.value == "Porcentagem alunos/egressos":
c.value = "Porcentagem"
if data[0] in ["Periódicos", "A1-A4", "A1", "A2", "A3", "A4", "Irestrito"]:
c.font = Font(color='FFFAFA')
c.fill = PatternFill(fill_type='solid', start_color='00B050', end_color='00B050')
elif data[0] != "Outros" and data[0] != "Número médio de docentes" and pinta == True:
c.fill = PatternFill(fill_type='solid', start_color='FFFFCC', end_color='FFFFCC')
if c.value in ["Tipo/Qualis", "Quantidade", "Porcentagem", "Quantidade com alunos/egressos", "Índice", "Acumulado", "Média por docente", "Número médio de docentes", "Nome Periódico", "Qualis 2019/ISSN Impresso", "Qualis 2019/ISSN Online", "Métrica", "Qtd.", "Qtd. %"]:
c.font = Font(bold=True)
if c.value != "Número médio de docentes":
bd = Side(border_style="thin", color="000000")
c.border = Border(left=bd, top=bd, right=bd, bottom=bd)
c.alignment = Alignment(horizontal='center', vertical='center')
yield c
def add_info(self):
ws = self.active # Primeiro sheet
ws.title = 'Parâmetros'
ws.append(["Estrato", "Peso"])
estratos = ["A1", "A2", "A3", "A4", "B1", "B2", "B3", "B4", ]
pesos = [1.000, 0.875, 0.750, 0.625, 0.500, 0.200, 0.100, 0.050, ]
for pos, estrato in enumerate(estratos):
ws.append([estrato, pesos[pos]])
ws.append([None, None])
ws.append(self.styled_cells(["Número médio de docentes"], ws))
ws.append(["ND", ND])
ws = self.create_sheet("Autores")
for row in dataframe_to_rows(self.autores, index=False, header=True):
ws.append(row)
lista_autores = []
for pos, autor in enumerate(self.relatorios["Author"]):
if autor.split(" ")[0] not in lista_autores:
lista_autores.append(autor.split(" ")[0])
else:
encontrou = False
for autor2 in self.relatorios["Author"]:
if autor2.split(" ")[0] == autor.split(" ")[0] and encontrou == False:
encontrou = True
for pos, autor3 in enumerate(lista_autores):
if autor3 == autor2.split(" ")[0]:
lista_autores[pos] = f"{autor2.split(' ')[0]} {autor2.split(' ')[1]}"
lista_autores.append(f"{autor.split(' ')[0]} {autor.split(' ')[1]}")
indicadores_copy = []
for tabela in self.indicadores:
indicadores_copy.append(tabela.copy())
for pos, tabela in enumerate(indicadores_copy):
lista_nome = []
for i in range(len(tabela.index)):
lista_nome.append(lista_autores[pos])
tabela["Nome Autor"] = lista_nome
indicadores_copy[pos] = tabela
indicadores_copy = | pd.concat(indicadores_copy, ignore_index=True, sort=False) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
"""
tqsdk.ta 模块包含了一批常用的技术指标计算函数
"""
import numpy as np
import pandas as pd
import numba
from tqsdk import ta_func
def ATR(df, n):
"""平均真实波幅"""
new_df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import operator
import string
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_110
from cudf.testing._utils import (
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
@pytest.fixture
def pd_str_cat():
categories = list("abc")
codes = [0, 0, 1, 0, 1, 2, 0, 1, 1, 2]
return pd.Categorical.from_codes(codes, categories=categories)
def test_categorical_basic():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
cudf_cat = cudf.Index(cat)
pdsr = pd.Series(cat, index=["p", "q", "r", "s", "t"])
sr = cudf.Series(cat, index=["p", "q", "r", "s", "t"])
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
string = str(sr)
expect_str = """
p a
q a
r b
s c
t a
"""
assert all(x == y for x, y in zip(string.split(), expect_str.split()))
assert_eq(cat.codes, cudf_cat.codes.to_array())
def test_categorical_integer():
if not PANDAS_GE_110:
pytest.xfail(reason="pandas >=1.1 required")
cat = pd.Categorical(["a", "_", "_", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(
cat.codes, sr.cat.codes.astype(cat.codes.dtype).fillna(-1).to_array()
)
assert sr.null_count == 2
np.testing.assert_array_equal(
pdsr.cat.codes.values,
sr.cat.codes.astype(pdsr.cat.codes.dtype).fillna(-1).to_array(),
)
string = str(sr)
expect_str = """
0 a
1 <NA>
2 <NA>
3 c
4 a
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
assert string.split() == expect_str.split()
def test_categorical_compare_unordered():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# test equal
out = sr == sr
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr == pdsr)
# test inequality
out = sr != sr
assert not np.any(out.to_array())
assert not np.any(pdsr != pdsr)
assert not pdsr.cat.ordered
assert not sr.cat.ordered
# test using ordered operators
assert_exceptions_equal(
lfunc=operator.lt,
rfunc=operator.lt,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
)
def test_categorical_compare_ordered():
cat1 = pd.Categorical(
["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True
)
pdsr1 = pd.Series(cat1)
sr1 = cudf.Series(cat1)
cat2 = pd.Categorical(
["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True
)
pdsr2 = pd.Series(cat2)
sr2 = cudf.Series(cat2)
# test equal
out = sr1 == sr1
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr1 == pdsr1)
# test inequality
out = sr1 != sr1
assert not np.any(out.to_array())
assert not np.any(pdsr1 != pdsr1)
assert pdsr1.cat.ordered
assert sr1.cat.ordered
# test using ordered operators
np.testing.assert_array_equal(pdsr1 < pdsr2, (sr1 < sr2).to_array())
np.testing.assert_array_equal(pdsr1 > pdsr2, (sr1 > sr2).to_array())
def test_categorical_binary_add():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
expected_error_message="Series of dtype `category` cannot perform "
"the operation: add",
)
def test_categorical_unary_ceil():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=getattr,
rfunc=sr.ceil,
lfunc_args_and_kwargs=([pdsr, "ceil"],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: ceil",
)
def test_categorical_element_indexing():
"""
Element indexing to a cat column must give the underlying object
not the numerical index.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_eq(pdsr, sr)
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
def test_categorical_masking():
"""
Test common operation for getting a all rows that matches a certain
category.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# check scalar comparison
expect_matches = pdsr == "a"
got_matches = sr == "a"
np.testing.assert_array_equal(
expect_matches.values, got_matches.to_array()
)
# mask series
expect_masked = pdsr[expect_matches]
got_masked = sr[got_matches]
assert len(expect_masked) == len(got_masked)
assert len(expect_masked) == got_masked.valid_count
assert_eq(got_masked, expect_masked)
def test_df_cat_set_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a")
pddf = df.to_pandas(nullable_pd_dtype=False)
expect = pddf.set_index("a")
assert_eq(got, expect)
def test_df_cat_sort_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a").sort_index()
expect = df.to_pandas(nullable_pd_dtype=False).set_index("a").sort_index()
assert_eq(got, expect)
def test_cat_series_binop_error():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
dfa = df["a"]
dfb = df["b"]
# lhs is a categorical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfa, dfb],),
rfunc_args_and_kwargs=([dfa, dfb],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: add",
)
# if lhs is a numerical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfb, dfa],),
rfunc_args_and_kwargs=([dfb, dfa],),
check_exception_type=False,
expected_error_message="'add' operator not supported",
)
@pytest.mark.parametrize("num_elements", [10, 100, 1000])
def test_categorical_unique(num_elements):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), num_elements
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_sorted = np.sort(gdf["a"].unique().to_pandas())
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique_sorted = np.sort(pdf["a"].unique())
# verify
np.testing.assert_array_equal(pdf_unique_sorted, gdf_unique_sorted)
@pytest.mark.parametrize("nelem", [20, 50, 100])
def test_categorical_unique_count(nelem):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), nelem
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_count = gdf["a"].nunique()
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique = pdf["a"].unique()
# verify
assert gdf_unique_count == len(pdf_unique)
def test_categorical_empty():
cat = pd.Categorical([])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(cat.codes, sr.cat.codes.to_array())
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
def test_categorical_set_categories():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
psr = pd.Series(cat)
sr = cudf.Series.from_categorical(cat)
# adding category
expect = psr.cat.set_categories(["a", "b", "c", "d"])
got = sr.cat.set_categories(["a", "b", "c", "d"])
assert_eq(expect, got)
# removing category
expect = psr.cat.set_categories(["a", "b"])
got = sr.cat.set_categories(["a", "b"])
assert_eq(expect, got)
def test_categorical_set_categories_preserves_order():
series = pd.Series([1, 0, 0, 0, 2]).astype("category")
# reassigning categories should preserve element ordering
assert_eq(
series.cat.set_categories([1, 2]),
cudf.Series(series).cat.set_categories([1, 2]),
)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_ordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(False))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(False))
assert cd_sr.cat.ordered is False
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_ordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_ordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is True
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_unordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(True))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(True))
assert cd_sr.cat.ordered is True
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_unordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_unordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is False
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("from_ordered", [True, False])
@pytest.mark.parametrize("to_ordered", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_reorder_categories(
pd_str_cat, from_ordered, to_ordered, inplace
):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(from_ordered))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(from_ordered))
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
kwargs = dict(ordered=to_ordered, inplace=inplace)
pd_sr_1 = pd_sr.cat.reorder_categories(list("cba"), **kwargs)
cd_sr_1 = cd_sr.cat.reorder_categories(list("cba"), **kwargs)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert_eq(pd_sr_1, cd_sr_1)
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_add_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.add_categories(["d"], inplace=inplace)
cd_sr_1 = cd_sr.cat.add_categories(["d"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "d" in pd_sr_1.cat.categories.to_list()
assert "d" in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_remove_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.remove_categories(["a"], inplace=inplace)
cd_sr_1 = cd_sr.cat.remove_categories(["a"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "a" not in pd_sr_1.cat.categories.to_list()
assert "a" not in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
# test using ordered operators
assert_exceptions_equal(
lfunc=cd_sr.to_pandas().cat.remove_categories,
rfunc=cd_sr.cat.remove_categories,
lfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
rfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
expected_error_message="removals must all be in old categories",
)
def test_categorical_dataframe_slice_copy():
pdf = pd.DataFrame({"g": pd.Series(["a", "b", "z"], dtype="category")})
gdf = cudf.from_pandas(pdf)
exp = pdf[1:].copy()
gdf = gdf[1:].copy()
assert_eq(exp, gdf)
@pytest.mark.parametrize(
"data",
[
pd.Series([1, 2, 3, 89]),
pd.Series([1, 2, 3, 89, 3, 1, 89], dtype="category"),
pd.Series(["1", "2", "3", "4", "5"], dtype="category"),
pd.Series(["1.0", "2.5", "3.001", "9"], dtype="category"),
pd.Series(["1", "2", "3", None, "4", "5"], dtype="category"),
pd.Series(["1.0", "2.5", "3.001", None, "9"], dtype="category"),
pd.Series(["a", "b", "c", "c", "b", "a", "b", "b"]),
pd.Series(["aa", "b", "c", "c", "bb", "bb", "a", "b", "b"]),
pd.Series([1, 2, 3, 89, None, np.nan, np.NaN], dtype="float64"),
pd.Series([1, 2, 3, 89], dtype="float64"),
pd.Series([1, 2.5, 3.001, 89], dtype="float64"),
pd.Series([None, None, None]),
| pd.Series([], dtype="float64") | pandas.Series |
from unittest import TestCase
import sklearn_pmml_model
from sklearn_pmml_model.naive_bayes import PMMLGaussianNB
from sklearn.naive_bayes import GaussianNB
import pandas as pd
import numpy as np
from os import path
from io import StringIO
BASE_DIR = path.dirname(sklearn_pmml_model.__file__)
class TestNaiveBayes(TestCase):
def test_invalid_model(self):
with self.assertRaises(Exception) as cm:
PMMLGaussianNB(pmml=StringIO("""
<PMML xmlns="http://www.dmg.org/PMML-4_4" version="4.4">
<DataDictionary>
<DataField name="Class" optype="categorical" dataType="string">
<Value value="setosa"/>
<Value value="versicolor"/>
<Value value="virginica"/>
</DataField>
</DataDictionary>
<MiningSchema>
<MiningField name="Class" usageType="target"/>
</MiningSchema>
</PMML>
"""))
assert str(cm.exception) == 'PMML model does not contain NaiveBayesModel.'
def test_unsupported_distribution(self):
with self.assertRaises(Exception) as cm:
PMMLGaussianNB(pmml=StringIO("""
<PMML xmlns="http://www.dmg.org/PMML-4_3" version="4.3">
<DataDictionary>
<DataField name="Class" optype="categorical" dataType="string">
<Value value="setosa"/>
<Value value="versicolor"/>
<Value value="virginica"/>
</DataField>
<DataField name="a" optype="continuous" dataType="double"/>
</DataDictionary>
<NaiveBayesModel>
<MiningSchema>
<MiningField name="Class" usageType="target"/>
</MiningSchema>
<BayesInputs>
<BayesInput fieldName="a">
<TargetValueStats>
<TargetValueStat value="setosa">
<PoissonDistribution mean="2.80188679245283"/>
</TargetValueStat>
</TargetValueStats>
</BayesInput>
</BayesInputs>
</NaiveBayesModel>
</PMML>
"""))
assert str(cm.exception) == 'Distribution "PoissonDistribution" not implemented, or not supported by scikit-learn'
class TestGaussianNBIntegration(TestCase):
def setUp(self):
df = pd.read_csv(path.join(BASE_DIR, '../models/categorical-test.csv'))
Xte = df.iloc[:, 1:]
Xte = | pd.get_dummies(Xte, prefix_sep='') | pandas.get_dummies |
import os, os.path
import pandas as pd
from requests.auth import HTTPBasicAuth
from tqdm.auto import tqdm
import requests
import time
import datetime
from apikeys import key
def currency_pair_exists(currency_pair):
'''
Check if currenct pair exists
:param str currency_pair: Currency pair (ex btcusd)
'''
url = f"https://www.bitstamp.net/api/v2/ohlc/{currency_pair}/?step=60&limit=1"
headers = {"Accept": "application/json"}
auth = HTTPBasicAuth('apikey', key.apikey)
response = requests.get(url, headers=headers , auth=auth)
if response.text == "":
return False
try:
response.json()["data"]
except TypeError:
return False
return True
def get_data(currency_pair, end=None, start=None, step=60, limit=1000):
'''
Get bitstamp historic data
:param str currency_pair: Currency pair (ex btcusd)
:param str end: Final date
:param int step: Seconds step, 60, 180, 300, 900, 1800, 3600, 7200, 14400, 21600, 43200, 86400, 259200
:param int limit: How many steps
'''
if end:
end = int(time.mktime(datetime.datetime.strptime(end, "%d/%m/%Y %H %M %S").timetuple()))
else:
end = int(datetime.datetime.now().timestamp())
url = f"https://www.bitstamp.net/api/v2/ohlc/{currency_pair}/?step={step}&limit={limit}&end={end}"
if start:
url = f"https://www.bitstamp.net/api/v2/ohlc/{currency_pair}/?step={step}&limit={limit}&start={start}"
headers = {"Accept": "application/json"}
auth = HTTPBasicAuth('apikey', key.apikey)
return requests.get(url, headers=headers , auth=auth)
def check_availability(currency_pair):
'''
Return first and last available dates on dataset for currency_pair and dataset if available
:param str currency_pair: Currency pair (ex btcusd)
:raise ValueError: if currency_pair not in database
'''
path = f"database/{currency_pair}.pkl"
if not os.path.isfile(path):
raise ValueError("Currency pair not found in the database")
df = pd.read_pickle(path)
return df.index[0], df.index[-1], df
def populate_dataset(currency_pair, step=60, limit=1000, n_requests=100):
'''
Populate dataset for currency_pair
:param str currency_pair: Currency pair (ex btcusd)
:param int step: Seconds step, 60, 180, 300, 900, 1800, 3600, 7200, 14400, 21600, 43200, 86400, 259200
:param int limit: How many steps
:param int n_requests: How many requests, max 8000 per 10 minutes
'''
if not currency_pair_exists(currency_pair):
raise ValueError("This currency pair is not available to download.")
if not os.path.isdir('database'):
if os.path.isdir('../database'):
os.chdir("..")
else:
raise FileNotFoundError("Can't find database folder, you are in the wrong folder.")
try:
start, _, old_df = check_availability(currency_pair)
except ValueError:
print("Currency pair not found in the database, creating new dataset...")
start = datetime.datetime.strptime("15/02/2021", "%d/%m/%Y")
old_df = pd.DataFrame([])
datas = [get_data(
currency_pair=currency_pair,
step=step,
limit=limit,
end=(start - datetime.timedelta(seconds=step*limit)*i).strftime("%d/%m/%Y %H %M %S"))
for i in tqdm(range(n_requests))]
df = pd.concat([pd.DataFrame(data.json()["data"]["ohlc"]) for data in reversed(datas)]).astype(float)
df.timestamp = df.timestamp.astype(int)
df.index = | pd.to_datetime(df.timestamp, unit='s') | pandas.to_datetime |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = | Period(freq="H", year=2007, month=1, day=1, hour=0) | pandas.Period |
# util.py
from __future__ import print_function
from collections import Mapping, OrderedDict
import datetime
import itertools
import random
import warnings
import pandas as pd
np = pd.np
from scipy import integrate
from matplotlib import pyplot as plt
import seaborn
from scipy.optimize import minimize
from scipy.signal import correlate
from titlecase import titlecase
from pug.nlp.util import listify, fuzzy_get, make_timestamp
def dropna(x):
"""Delete all NaNs and and infinities in a sequence of real values
Returns:
list: Array of all values in x that are between -inf and +inf, exclusive
"""
return [x_i for x_i in listify(x) if float('-inf') < x_i < float('inf')]
def rms(x):
""""Root Mean Square"
Arguments:
x (seq of float): A sequence of numerical values
Returns:
The square root of the average of the squares of the values
math.sqrt(sum(x_i**2 for x_i in x) / len(x))
or
return (np.array(x) ** 2).mean() ** 0.5
>>> rms([0, 2, 4, 4])
3.0
"""
try:
return (np.array(x) ** 2).mean() ** 0.5
except:
x = np.array(dropna(x))
invN = 1.0 / len(x)
return (sum(invN * (x_i ** 2) for x_i in x)) ** .5
def rmse(target, prediction, relative=False, percent=False):
"""Root Mean Square Error
This seems like a simple formula that you'd never need to create a function for.
But my mistakes on coding challenges have convinced me that I do need it,
as a reminder of important tweaks, if nothing else.
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1])
3.0
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], relative=True) # doctest: +ELLIPSIS
1.2247...
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], percent=True) # doctest: +ELLIPSIS
122.47...
"""
relative = relative or percent
prediction = pd.np.array(prediction)
target = np.array(target)
err = prediction - target
if relative:
denom = target
# Avoid ZeroDivisionError: divide by prediction rather than target where target==0
denom[denom == 0] = prediction[denom == 0]
# If the prediction and target are both 0, then the error is 0 and should be included in the RMSE
# Otherwise, the np.isinf() below would remove all these zero-error predictions from the array.
denom[(denom == 0) & (target == 0)] = 1
err = (err / denom)
err = err[(~ np.isnan(err)) & (~ np.isinf(err))]
return 100 * rms(err) if percent else rms(err)
def blended_rolling_apply(series, window=2, fun=pd.np.mean):
new_series = pd.Series(np.fromiter((fun(series[:i + 1]) for i in range(window - 1)),
type(series.values[0])), index=series.index[:window - 1]).append(
pd.rolling_apply(series.copy(), window, fun)[window - 1:])
assert len(series) == len(new_series), (
"blended_rolling_apply should always return a series of the same length!\n"
" len(series) = {0} != {1} = len(new_series".format(len(series), len(new_series)))
assert not any(np.isnan(val) or val is None for val in new_series)
return new_series
def rolling_latch(series, period=31, decay=1.0):
# FIXME: implement recursive exponential decay filter rather than the nonrecursive, deratring done here
return blended_rolling_apply(series, period, lambda val: decay * | pd.np.max(val) | pandas.np.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
alpha.columns = ['alpha126']
return alpha
@timer
def alpha127(self):
close = self.close
close_max = TsMax(close,12)
data = pd.concat([close,close_max], axis = 1, join = 'inner')
data.columns = ['close','close_max']
alpha = pd.DataFrame((data['close'] - data['close_max'])/data['close_max'])
alpha.columns = ['alpha127']
return alpha
@timer
def alpha128(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
data = pd.concat([close,high,low,volume], axis = 1, join = 'inner')
data['temp1'] = (data['Close'] + data['Low'] + data['High'])/3
data['temp2'] = data['temp1'] * data['Vol']
data['temp3'] = data['temp1'] * data['Vol']
temp_delay = Delay(pd.DataFrame(data['temp1']),1)
temp_delay.columns = ['temp_decay']
data = pd.concat([data,temp_delay], axis = 1, join = 'inner')
data['temp2'][data['temp1'] < data['temp_decay']] = 0
data['temp3'][data['temp1'] > data['temp_decay']] = 0
s1 = Sum(pd.DataFrame(data['temp2']),14)
s2 = Sum(pd.DataFrame(data['temp3']),14)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(100 - 100/(1+ s['s1']/s['s2']))
alpha.columns = ['alpha128']
return alpha
@timer
def alpha129(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['abs'] = np.abs(data['close'] - data['close_delay'])
data['temp'] = data['abs']
data['temp'][data['close'] < data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha129']
return alpha
@timer
def alpha130(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,40)
data1 = pd.concat([high,low],axis = 1, join = 'inner')
temp1 = pd.DataFrame((data1['High'] + data1['Low'])/2)
rank1 = pd.concat([temp1,volume_mean], axis = 1, join = 'inner')
corr = Corr(rank1,9)
close_r = Rank(close)
volume_r = Rank(volume)
data2 = pd.concat([close_r,volume_r],axis = 1, join = 'inner')
corr2 = Corr(data2,7)
corr_decay = DecayLinear(corr,10)
r1 = Rank(corr_decay)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha130']
return alpha
@timer
def alpha131(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,50)
data1 = pd.concat([close,volume_mean], axis = 1, join = 'inner')
corr = Corr(data1,18)
vwap_delta = Delta(vwap,1)
temp2 = TsRank(corr,18)
data2 = pd.concat([vwap_delta,temp2],axis = 1, join = 'inner')
data2.columns = ['vwap_delta','temp2']
temp3 = np.power(data2['vwap_delta'],data2['temp2'])
alpha = Rank(pd.DataFrame(temp3))
alpha.columns = ['alpha131']
return alpha
@timer
def alpha132(self):
amt = self.amt
alpha = Mean(amt,20)
alpha.columns = ['alpha132']
return alpha
@timer
def alpha133(self):
low = self.low
high = self.high
highday = Highday(high,20)
lowday = Lowday(low,20)
data = pd.concat([highday,lowday],axis = 1, join = 'inner')
data.columns = ['highday','lowday']
alpha = (20 - data['highday']/20.0) * 100 - (20 - data['lowday']/20.0) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha133']
return alpha
@timer
def alpha134(self):
close = self.close
volume = self.volume
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,volume,close_delay], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] - data['close_delay'])/data['close_delay'])
alpha.columns = ['alpha134']
return alpha
@timer
def alpha135(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1 , join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
alpha = SMA(temp_delay,20,1)
alpha.columns = ['alpha135']
return alpha
@timer
def alpha136(self):
volume = self.volume
Open = self.open
ret = self.ret
ret_delta = Delta(ret,3)
ret_delta_r = Rank(ret_delta)
data = pd.concat([Open,volume],axis = 1, join = 'inner')
corr = Corr(data,10)
data_temp = pd.concat([ret_delta_r,corr],axis = 1, join = 'inner')
data_temp.columns = ['ret_delta','corr']
alpha = pd.DataFrame(-1 * data_temp['ret_delta'] * data_temp['corr'])
alpha.columns = ['alpha136']
return alpha
@timer
def alpha137(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
alpha = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha.columns = ['alpha137']
return alpha
@timer
def alpha138(self):
vwap = self.vwap
volume = self.volume
low = self.low
data1 = pd.concat([low,vwap], axis = 1, join = 'inner')
temp1 = pd.DataFrame(data1['Low'] * 0.7 + data1['Vwap'] * 0.3)
temp1_delta = Delta(temp1,3)
temp1_delta_decay = DecayLinear(temp1_delta,20)
r1 = Rank(temp1_delta_decay)
low_r = TsRank(low,8)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,17)
data2 = pd.concat([low_r,volume_mean_r],axis = 1, join = 'inner')
corr = Corr(data2,5)
corr_r = TsRank(corr,19)
corr_r_decay = DecayLinear(corr_r,16)
r2 = TsRank(corr_r_decay,7)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha138']
return alpha
@timer
def alpha139(self):
Open = self.open
volume = self.volume
data = pd.concat([Open,volume], axis = 1, join = 'inner')
alpha = -1 * Corr(data,10)
alpha.columns = ['alpha139']
return alpha
@timer
def alpha140(self):
Open = self.open
volume = self.volume
high = self.high
low = self.low
close = self.close
open_r = Rank(Open)
low_r = Rank(low)
high_r = Rank(high)
close_r = Rank(close)
data1 = pd.concat([open_r,low_r,high_r,close_r],axis = 1, join = 'inner')
data1.columns = ['open_r','low_r','high_r','close_r']
temp = pd.DataFrame(data1['open_r'] + data1['low_r'] - \
(data1['high_r'] + data1['close_r']))
close_r_temp = TsRank(close,8)
volume_mean = Mean(volume,70)
volume_mean_r = TsRank(volume_mean,20)
data2 = pd.concat([close_r_temp,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data2,8)
temp_decay = DecayLinear(temp,8)
r1 = Rank(temp_decay)
corr_decay = DecayLinear(corr,7)
r2 = TsRank(corr_decay,3)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = pd.DataFrame(np.min(r))
alpha.columns = ['alpha140']
return alpha
@timer
def alpha141(self):
volume = self.volume
high = self.high
volume_mean = Mean(volume,15)
high_r = Rank(high)
volume_mean_r = Rank(volume_mean)
data = pd.concat([high_r,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data,9)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha141']
return alpha
@timer
def alpha142(self):
close = self.close
volume = self.volume
close_r = TsRank(close,10)
r1 = Rank(close_r)
close_delta = Delta(close,1)
close_delta_delta = Delta(close_delta,1)
r2 = Rank(close_delta_delta)
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['v','v_m']
temp = pd.DataFrame(data['v']/data['v_m'])
temp_r = TsRank(temp,5)
r3 = Rank(temp_r)
r = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(- 1* r['r1'] * r['r2'] * r['r3'])
alpha.columns= ['alpha142']
return alpha
@timer
def alpha143(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] - data['close_delay'])/data['close_delay'])
temp.columns= ['temp']
data_temp = pd.concat([data,temp],axis = 1, join = 'inner')
data_temp['temp'][data_temp['close'] <= data_temp['close_delay']] = 1
temp_unstack = data_temp['temp'].unstack()
temp_unstack.iloc[0,:] = 1
df = np.cumprod(temp_unstack,axis = 0)
alpha = df.stack()
alpha.columns = ['alpha143']
return alpha
@timer
def alpha144(self):
close = self.close
amt = self.amt
close_delay = Delay(close,1)
data = pd.concat([close,close_delay,amt], axis = 1, join = 'inner')
data.columns = ['close','close_delay','amt']
data['temp'] = np.abs(data['close']/data['close_delay'] - 1)/data['amt']
data['sign'] = 1
data['sign'][data['close'] >= data['close_delay']] = 0
tep1 = Sum(pd.DataFrame(data['sign'] * data['temp']),20)
tep2 = Count(0,pd.DataFrame(data['close_delay']),pd.DataFrame(data['close']),20)
data2 = pd.concat([tep1,tep2], axis = 1, join = 'inner')
data2.columns = ['tep1','tep2']
alpha = pd.DataFrame(data2['tep1']/data2['tep2'])
alpha.columns = ['alpha144']
return alpha
@timer
def alpha145(self):
volume = self.volume
volume_mean9 = Mean(volume,9)
volume_mean26 = Mean(volume,26)
volume_mean12 = Mean(volume,12)
data = pd.concat([volume_mean9,volume_mean26,volume_mean12], axis = 1, join = 'inner')
data.columns = ['m9','m26','m12']
alpha = pd.DataFrame((data['m9'] - data['m26'])/data['m12'] * 100)
alpha.columns = ['alpha145']
return alpha
@timer
def alpha146(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] -data['close_delay'])/data['close_delay'])
sma1 = SMA(temp,61,2)
data2 = pd.concat([temp,sma1], axis = 1, join = 'inner')
data2.columns = ['temp1','sma1']
data2['temp2'] = data2['temp1'] - data2['sma1']
temp2_mean = Mean(pd.DataFrame(data2['temp2']),20)
sma2 = SMA(pd.DataFrame(data2['temp1'] - data2['temp2']),61,2)
data_temp = pd.concat([temp2_mean,pd.DataFrame(data2['temp2']),sma2], axis = 1 , join = 'inner')
data_temp.columns = ['temp2_mean','temp2','sma2']
alpha = data_temp['temp2_mean'] * data_temp['temp2'] / data_temp['sma2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha146']
return alpha
@timer
def alpha147(self):
close = self.close
close_mean = Mean(close,12)
alpha = RegBeta(0,close_mean,None,12)
alpha.columns = ['alpha147']
return alpha
@timer
def alpha148(self):
Open = self.open
volume = self.volume
volume_mean = Mean(volume,60)
volume_mean_s = Sum(volume_mean,9)
data = pd.concat([Open,volume_mean_s],axis = 1, join = 'inner')
corr = Corr(data,6)
r1 = Rank(corr)
open_min = TsMin(Open,14)
data2 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data2.columns = ['open','open_min']
r2 = Rank(pd.DataFrame(data2['open'] - data2['open_min']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = -1
r['alpha'][r['r1'] > r['r2']] = 0
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha148']
return alpha
@timer
def alpha149(self):
close = self.close
close_index = self.close_index
close_delay = Delay(close,1)
close_index_delay = Delay(close_index,1)
data_index = pd.concat([close_index,close_index_delay], axis = 1, join = 'inner')
data_index.columns = ['close','close_delay']
data_index['delta'] = data_index['close']/data_index['close_delay'] - 1
data_index['judge'] = 1
data_index['judge'][data_index['close'] >= data_index['close_delay']] = 0
data_index['delta'][data_index['judge'] == 0] = np.nan
# index_delta_unstack = index_delta_unstack.dropna()
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['delta'] = data['close'] / data['close_delay'] - 1
df1 = pd.DataFrame(data['delta'])
df2 = pd.DataFrame(data_index['delta'])
alpha = RegBeta(1,df1,df2,252)
alpha.columns = ['alpha149']
return alpha
@timer
def alpha150(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
alpha = (data['Close'] + data['High'] + data['Low'])/3 * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha150']
return alpha
@timer
def alpha151(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close'] - data['close_delay'])
alpha = SMA(temp,20,1)
alpha.columns = ['alpha151']
return alpha
@timer
def alpha152(self):
close = self.close
close_delay = Delay(close,9)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
sma1 = SMA(temp_delay,9,1)
sma1_delay = Delay(sma1,1)
sma1_delay_mean1 = Mean(sma1_delay,12)
sma1_delay_mean2 = Mean(sma1_delay,26)
data_temp = pd.concat([sma1_delay_mean1,sma1_delay_mean2],axis = 1, join = 'inner')
data_temp.columns = ['m1','m2']
alpha = SMA(pd.DataFrame(data_temp['m1'] - data_temp['m2']),9,1)
alpha.columns = ['alpha152']
return alpha
@timer
def alpha153(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close_mean3, close_mean6, close_mean12, close_mean24], axis = 1 ,join ='inner')
alpha = pd.DataFrame(np.mean(data, axis = 1))
alpha.columns = ['alpha153']
return alpha
@timer
def alpha154(self):
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,180)
data = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr = Corr(data,18)
vwap_min = TsMin(vwap,16)
data1 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data1.columns = ['vwap','vwap_min']
temp = pd.DataFrame(data1['vwap'] - data1['vwap_min'])
data_temp = pd.concat([corr,temp], axis = 1, join = 'inner')
data_temp.columns = ['corr','temp']
data_temp['alpha'] = 1
data_temp['alpha'][data_temp['corr'] >= data_temp['temp']] = 0
alpha = pd.DataFrame(data_temp['alpha'])
alpha.columns = ['alpha154']
return alpha
@timer
def alpha155(self):
volume = self.volume
sma1 = SMA(volume,13,2)
sma2 = SMA(volume,26,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3], axis = 1 ,join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(data['temp'] - data['sma'])
alpha.columns = ['alpha155']
return alpha
@timer
def alpha156(self):
vwap = self.vwap
Open = self.open
low = self.low
vwap_delta = Delta(vwap,5)
vwap_delta_decay = DecayLinear(vwap_delta,3)
r1 = Rank(vwap_delta_decay)
data1 = pd.concat([Open,low],axis = 1, join = 'inner')
temp = -1 * Delta(pd.DataFrame(data1['Open'] * 0.15 + data1['Low'] * 0.85),2)
temp_decay = DecayLinear(temp,3)
r2 = Rank(temp_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(- 1 *np.max(r, axis = 1))
alpha.columns = ['alpha156']
return alpha
@timer
def alpha157(self):
close = self.close
ret = self.ret
close_delta = Delta(close,5)
close_delta_r = Rank(Rank(close_delta) * -1)
r1 = TsMin(close_delta_r,2)
ret_delay = Delay(-1 * ret,6)
r2 = TsRank(ret_delay,5)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
temp = pd.DataFrame(r['r1'] + r['r2'])
alpha = TsMin(temp,5)
alpha.columns = ['alpha157']
return alpha
@timer
def alpha158(self):
high = self.high
low = self.low
close = self.close
temp = SMA(close,15,2)
temp.columns = ['temp']
data = pd.concat([high,low,close,temp],axis = 1 , join = 'inner')
alpha =(data['High'] + data['Low'] - 2 * data['temp'] )/data['Close']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha158']
return alpha
@timer
def alpha159(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
data1 = pd.concat([low,close_delay],axis = 1, join = 'inner')
data2 = pd.concat([high, close_delay], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.min(data1,axis = 1))
temp2= pd.DataFrame(np.max(data2,axis = 1))
temp = pd.concat([temp1,temp2], axis = 1 ,join = 'inner')
temp.columns = ['temp1','temp2']
temp1_sum6 = Sum(temp1,6)
temp1_sum12 = Sum(temp1,12)
temp1_sum24 = Sum(temp1,24)
tep = pd.DataFrame(temp['temp2'] - temp['temp1'])
s6 = Sum(tep,6)
s12 = Sum(tep,12)
s24 = Sum(tep,24)
data3 = pd.concat([temp1_sum6,temp1_sum12,temp1_sum24,s6,s12,s24], axis = 1 ,join = 'inner')
data3.columns = ['ts6','ts12','ts24','s6','s12','s24']
temp3 = pd.DataFrame(data3['ts6']/data3['s6'] * 12 * 24 + data3['ts12']/data3['s12'] * 6 * 24 \
+ data3['ts24']/data3['s24'] * 6 * 24)
alpha = temp3 / (6*12 + 6*24 + 12*24) * 100
alpha.columns = ['alpha159']
return alpha
@timer
def alpha160(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_std','close_delay']
data['close_std'][data['close'] >= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data['close_std']),20,1)
alpha.columns = ['alpha160']
return alpha
@timer
def alpha161(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data1 = pd.concat([high,low],axis = 1 , join = 'inner')
diff = pd.DataFrame(data1['High'] - data1['Low'])
data2 = pd.concat([close_delay,high], axis = 1, join ='inner')
abs1 = pd.DataFrame(np.abs(data2['close_delay'] - data2['High']))
data3 = pd.concat([diff,abs1], axis = 1, join = 'inner')
temp1 = pd.DataFrame(np.max(data3,axis = 1))
data4 = pd.concat([close_delay,low],axis = 1, join = 'inner')
temp2 = pd.DataFrame(np.abs(data4['close_delay'] -data4['Low']))
data = pd.concat([temp1,temp2],axis =1 , join = 'inner')
data.columns = ['temp1','temp2']
temp = pd.DataFrame(np.max(data, axis = 1))
alpha = Mean(temp,12)
alpha.columns = ['alpha161']
return alpha
@timer
def alpha162(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['max']= data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
temp1 = SMA(pd.DataFrame(data['max']),12,1)
temp2 = SMA(pd.DataFrame(data['abs']),12,1)
data1 = pd.concat([temp1,temp2], axis = 1, join = 'inner')
data1.columns = ['temp1','temp2']
tep = pd.DataFrame(data1['temp1']/data1['temp2'])
temp3 = TsMin(tep,12)
temp4 = TsMax(tep,12)
data_temp = pd.concat([tep,temp3,temp4], axis = 1, join = 'inner')
data_temp.columns = ['tep','temp3','temp4']
alpha = (data_temp['tep'] - data_temp['temp3']/data_temp['temp4']) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha162']
return alpha
@timer
def alpha163(self):
low = self.low
high = self.high
volume = self.volume
ret = self.ret
vwap = self.vwap
volume_mean = Mean(volume,20)
data = pd.concat([high,low,vwap,ret,volume_mean],axis = 1, join = 'inner')
data.columns = ['high','low','vwap','ret','volume_mean']
temp = pd.DataFrame(-1 *data['ret'] * data['volume_mean'] *data['vwap'] * \
(data['high'] - data['low']))
alpha = Rank(temp)
alpha.columns = ['alpha163']
return alpha
@timer
def alpha164(self):
close = self.close
high = self.high
low = self.low
close_delay = Delay(close,1)
data = pd.concat([close,high,low,close_delay],axis = 1, join = 'inner')
data.columns = ['close','high','low','close_delay']
data['temp'] = 1/(data['close'] - data['close_delay'])
data_min = TsMin(pd.DataFrame(data['temp']),12)
data_min.columns = ['min']
data2 = pd.concat([data,data_min],axis = 1, join = 'inner')
data2['tep'] = data2['temp'] - data2['min']/(data2['high'] - data2['low'])
data2['tep'][data['close'] <= data['close_delay']] = 0
alpha = SMA(pd.DataFrame(data2['tep']) * 100,13,2)
alpha.columns = ['alpha164']
return alpha
@timer
def alpha165(self):
close = self.close
close_mean = Mean(close,48)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame(data['close'] - data['close_mean'])
temp_sum = Sum(temp,48)
temp_sum_min = TsMin(temp_sum,48)
temp_sum_max = TsMax(temp_sum,48)
close_std = STD(close,48)
data_temp = pd.concat([temp_sum_min,temp_sum_max,close_std], axis = 1, join = 'inner')
data_temp.columns = ['min','max','std']
alpha = (data_temp['max'] - data_temp['min'])/data_temp['std']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha165']
return alpha
@timer
def alpha166(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_mean = Mean(temp,20)
data1 = pd.concat([temp,temp_mean], axis = 1, join = 'inner')
data1.columns = ['temp','temp_mean']
temp2 = Sum(pd.DataFrame(data1['temp'] - data1['temp_mean']),20) * 20 * 19
temp3 = Sum(temp,20) * 19 * 18
data2 = pd.concat([temp2,temp3], axis = 1, join = 'inner')
data2.columns = ['temp2','temp3']
alpha = np.power(data2['temp2'],1.5)/np.power(data2['temp3'],1.5)
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha166']
return alpha
@timer
def alpha167(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = data['close'] - data['close_delay']
data['temp'][data['close'] <= data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha167']
return alpha
@timer
def alpha168(self):
volume = self.volume
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['volume','volume_mean']
alpha = data['volume']/data['volume_mean'] * -1
alpha.columns = ['alpha168']
return alpha
@timer
def alpha169(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp1 = pd.DataFrame(data['close'] - data['close_delay'])
sma = SMA(temp1,9,1)
temp2 = Delay(sma,1)
temp2_mean12 = Mean(temp2,12)
temp2_mean26 = Mean(temp2,26)
data2 = pd.concat([temp2_mean12,temp2_mean26], axis = 1, join ='inner')
data2.columns = ['mean1','mean2']
alpha = SMA(pd.DataFrame(data2['mean1'] - data2['mean2']),10,1)
alpha.columns = ['alpha169']
return alpha
@timer
def alpha170(self):
close = self.close
high = self.high
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,20)
data1 = pd.concat([high,close,volume,volume_mean], axis = 1, join = 'inner')
data1.columns =['high','close','volume','volume_mean']
temp1 = pd.DataFrame(data1['high']/data1['close'] * data1['volume']/data1['volume_mean'])
r1 = Rank(temp1)
high_mean = Mean(high,5)
vwap_delay = Delay(vwap,5)
data2 = pd.concat([high,close,high_mean], axis = 1, join = 'inner')
data2.columns = ['high','close','high_mean']
temp2 = pd.DataFrame((data2['high'] - data2['close'])/data2['high_mean'])
temp2_r = Rank(temp2)
data3 = pd.concat([vwap,vwap_delay], axis = 1, join = 'inner')
data3.columns = ['vwap','vwap_delay']
temp3 = pd.DataFrame(data3['vwap'] - data3['vwap_delay'])
temp3_r = Rank(temp3)
rank = pd.concat([temp2_r,temp3_r], axis = 1, join = 'inner')
rank.columns = ['r1','r2']
r2 = pd.DataFrame(rank['r1'] - rank['r2'])
data_temp = pd.concat([r1,r2],axis = 1, join = 'inner')
data_temp.columns = ['r1','r2']
alpha = pd.DataFrame(data_temp['r1'] * data_temp['r2'])
alpha.columns = ['alpha170']
return alpha
@timer
def alpha171(self):
high = self.high
close = self.close
low = self.low
Open = self.open
data = pd.concat([high,close,low,Open],axis = 1, join = 'inner')
alpha = -1 * (data['Low'] - data['Close']) * np.power(data['Open'],5)/\
((data['Close'] - data['High']) * np.power(data['Close'],5))
alpha.columns = ['alpha171']
return alpha
@timer
def alpha172(self):
high = self.high
low = self.low
hd = HD(high)
ld = LD(low)
data = pd.concat([hd,ld],axis = 1, join = 'inner')
data.columns = ['hd','ld']
data['temp'] = 0
data['temp'][((data['hd'] > data['ld'])& (data['hd'] > 0)) | \
((data['ld'] > data['hd'])& (data['ld'] > 0))] = 1
alpha = Mean( | pd.DataFrame(data['temp']) | pandas.DataFrame |
import pandas as pd
import sys
import numpy as np
import pymysql
from xlrd import open_workbook
import xlrd
# To store the old data temporarily
#df2 =df.copy(deep=True)
# Data frame to store the old data
File_path = r"C:/Users/karthikais/Documents/Application_monitoring/App_monitoring.xlsx"
old= | pd.read_excel(File_path, sheetname='Last_Update_status') | pandas.read_excel |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'cab'))}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list(
'cab'))}).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'abc'))}).set_index('B')
self.assertRaises(TypeError, lambda: pd.concat([df2, df3]))
def test_append(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
# GH10183
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_na_actions(self):
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = pd.DataFrame({"cats": cat2, "vals": vals2})
cat3 = | pd.Categorical([1, 2, 3], categories=[1, 2, 3]) | pandas.Categorical |
import pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.stats as scp_stats
import pandas as pd
import matplotlib
matplotlib.rcParams.update({'font.size': 15})
def box_plot_data(tot_df, label, units, type_order, type_color, y_lim_top, out_fig_name):
# Drop NaN elements.
tmp_df = tot_df[tot_df[label].notnull()]
# Arrange data into a list of numpy arrays.
type_data = []
for type_key in type_order:
type_data.append(tmp_df[tmp_df['type']==type_key][label].values)
fig, ax = plt.subplots(figsize = (7, 5))
box = ax.boxplot(type_data, patch_artist=True, sym='c.') # notch=True
for patch, color in zip(box['boxes'], [type_color[type_key] for type_key in type_order]):
patch.set_facecolor(color)
for i, type_key in enumerate(type_order):
ax.errorbar([i+1], [type_data[i].mean()], yerr=[type_data[i].std() / np.sqrt(1.0 * type_data[i].size)], marker='o', ms=8, color='k', linewidth=2, capsize=5, markeredgewidth=2, ecolor='k', elinewidth=2)
ind = np.where(type_data[i] > y_lim_top)[0]
ax.annotate(u'$\u2191$'+'\n%d/%d' % (ind.size, type_data[i].size), xy=(i+1.2, 1.0*y_lim_top), fontsize=12)
ax.set_ylim((0.0, y_lim_top))
ax.set_xticks(range(1, len(type_order)+1))
ax.set_xticklabels(type_order)
if (units == ''):
ax.set_ylabel('%s' % (label))
else:
ax.set_ylabel('%s (%s)' % (label, units))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(size=10)
plt.savefig(out_fig_name, format='eps')
plt.show()
cell_db_path = '/allen/aibs/mat/antona/network/14-simulations/9-network/analysis/'
# Decide which systems we are doing analysis for.
sys_dict = {}
# sys_dict['ll1'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': cell_db_path + 'Ori/ll1_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll1_pref_stat.csv'}
# sys_dict['ll2'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': cell_db_path + 'Ori/ll2_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll2_pref_stat.csv'}
# sys_dict['ll3'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': cell_db_path + 'Ori/ll3_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll3_pref_stat.csv'}
#sys_dict['rl1'] = { 'cells_file': '../build/rl1.csv', 'f_out': 'Ori/rl1_rates.npy', 'f_out_pref': 'Ori/rl1_pref_stat.csv'}
#sys_dict['rl2'] = { 'cells_file': '../build/rl2.csv', 'f_out': 'Ori/rl2_rates.npy', 'f_out_pref': 'Ori/rl2_pref_stat.csv'}
#sys_dict['rl3'] = { 'cells_file': '../build/rl3.csv', 'f_out': 'Ori/rl3_rates.npy', 'f_out_pref': 'Ori/rl3_pref_stat.csv'}
#sys_dict['lr1'] = { 'cells_file': '../build/lr1.csv', 'f_out': 'Ori/lr1_rates.npy', 'f_out_pref': 'Ori/lr1_pref_stat.csv'}
#sys_dict['lr2'] = { 'cells_file': '../build/lr2.csv', 'f_out': 'Ori/lr2_rates.npy', 'f_out_pref': 'Ori/lr2_pref_stat.csv'}
#sys_dict['lr3'] = { 'cells_file': '../build/lr3.csv', 'f_out': 'Ori/lr3_rates.npy', 'f_out_pref': 'Ori/lr3_pref_stat.csv'}
#sys_dict['rr1'] = { 'cells_file': '../build/rr1.csv', 'f_out': 'Ori/rr1_rates.npy', 'f_out_pref': 'Ori/rr1_pref_stat.csv'}
#sys_dict['rr2'] = { 'cells_file': '../build/rr2.csv', 'f_out': 'Ori/rr2_rates.npy', 'f_out_pref': 'Ori/rr2_pref_stat.csv'}
#sys_dict['rr3'] = { 'cells_file': '../build/rr3.csv', 'f_out': 'Ori/rr3_rates.npy', 'f_out_pref': 'Ori/rr3_pref_stat.csv'}
#sys_dict['ll2_TF4Hz'] = { 'cells_file': '../build/ll2.csv', 'f_out': 'Ori/ll2_rates_4Hz.npy', 'f_out_pref': 'Ori/ll2_pref_stat_4Hz.csv' }
# sys_dict['ll1_LIF'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll1_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll1_pref_stat.csv'}
# sys_dict['ll2_LIF'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll2_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll2_pref_stat.csv'}
# sys_dict['ll3_LIF'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll3_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll3_pref_stat.csv'}
#sys_dict['rl1_LIF'] = { 'cells_file': '../build/rl1.csv', 'f_out': 'Ori_LIF/rl1_rates.npy', 'f_out_pref': 'Ori_LIF/rl1_pref_stat.csv'}
#sys_dict['rl2_LIF'] = { 'cells_file': '../build/rl2.csv', 'f_out': 'Ori_LIF/rl2_rates.npy', 'f_out_pref': 'Ori_LIF/rl2_pref_stat.csv'}
#sys_dict['rl3_LIF'] = { 'cells_file': '../build/rl3.csv', 'f_out': 'Ori_LIF/rl3_rates.npy', 'f_out_pref': 'Ori_LIF/rl3_pref_stat.csv'}
#sys_dict['lr1_LIF'] = { 'cells_file': '../build/lr1.csv', 'f_out': 'Ori_LIF/lr1_rates.npy', 'f_out_pref': 'Ori_LIF/lr1_pref_stat.csv'}
#sys_dict['lr2_LIF'] = { 'cells_file': '../build/lr2.csv', 'f_out': 'Ori_LIF/lr2_rates.npy', 'f_out_pref': 'Ori_LIF/lr2_pref_stat.csv'}
#sys_dict['lr3_LIF'] = { 'cells_file': '../build/lr3.csv', 'f_out': 'Ori_LIF/lr3_rates.npy', 'f_out_pref': 'Ori_LIF/lr3_pref_stat.csv'}
#sys_dict['rr1_LIF'] = { 'cells_file': '../build/rr1.csv', 'f_out': 'Ori_LIF/rr1_rates.npy', 'f_out_pref': 'Ori_LIF/rr1_pref_stat.csv'}
#sys_dict['rr2_LIF'] = { 'cells_file': '../build/rr2.csv', 'f_out': 'Ori_LIF/rr2_rates.npy', 'f_out_pref': 'Ori_LIF/rr2_pref_stat.csv'}
#sys_dict['rr3_LIF'] = { 'cells_file': '../build/rr3.csv', 'f_out': 'Ori_LIF/rr3_rates.npy', 'f_out_pref': 'Ori_LIF/rr3_pref_stat.csv'}
sys_dict['ll1_LIF'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll1_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll1_pref_stat.csv'}
sys_dict['ll2_LIF'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll2_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll2_pref_stat.csv'}
sys_dict['ll3_LIF'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll3_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll3_pref_stat.csv'}
# result_fig_prefix = 'Ori/new_Ori_bio_ll'
# result_fig_prefix = 'Ori/new_Ori_lif1_ll'
result_fig_prefix = 'Ori/new_Ori_lif4_ll'
result_fig_CV_ori = result_fig_prefix + '_CV_ori.eps'
result_fig_DSI = result_fig_prefix + '_DSI.eps'
type_color = {'Scnn1a': 'darkorange', 'Rorb': 'red', 'Nr5a1': 'magenta', 'PV1': 'blue', 'PV2': 'cyan', 'AnL4E': 'gray', 'AwL4E': 'gray', 'AnI': 'gray', 'AwI': 'gray'}
type_order = ['Scnn1a', 'Rorb', 'Nr5a1', 'AnL4E', 'AwL4E', 'PV1', 'PV2', 'AnI', 'AwI']
# Read files with OSI and DSI from simulations.
sim_df = pd.DataFrame()
for sys_name in sys_dict.keys():
tmp_df = pd.read_csv(sys_dict[sys_name]['f_out_pref'], sep=' ')
cells_df = pd.read_csv(sys_dict[sys_name]['cells_file'], sep=' ')
cells_df_1 = pd.DataFrame()
cells_df_1['id'] = cells_df['index'].values
cells_df_1['type'] = cells_df['type'].values
tmp_df = pd.merge(tmp_df, cells_df_1, on='id', how='inner')
# Combine dataframes from all systems into one file.
sim_df = | pd.concat([sim_df, tmp_df], axis=0) | pandas.concat |
"""
2018 <NAME>
train_models_given_z.py
This script will loop through different combinations of latent space
dimensionality, train a distinct model, and save associated decoder weights
and z matrices. The script will need to pull appropriate hyperparameters from
respective files after several initial sweeps testing various hyperparameter
combinations with different z dimensionality.
The script will fit several different compression algorithms and output
results. The results include the weight matrices and z matrices of the models
with the lowest reconstruction loss. The total stability of reconstruction
losses are represented through the variability associated with each model after
retraining several times given the argument for number of seeds. We also
report the determinants of correlation matrices for all iterations. These
values measure the stability of either latent space (z) matrices or weight
matrices across several rounds of fitting. In this case, as the determinants
approach zero, the more stable solutions are.
Usage:
python run_model_with_input_dimensions.py
With required command line arguments:
--num_components The z dimensionality we're testing
--param_config A tsv file (param by z dimension) indicating the
specific parameter combination for the z dimension
--out_dir The directory to store the output files
And optional command line arguments
--num_seeds The number of specific models to generate
default: 5
"""
import os
import argparse
import numpy as np
import pandas as pd
from tybalt.data_models import DataModel
def get_lowest_loss(matrix_list, reconstruction_df,
algorithms=['pca', 'ica', 'nmf', 'dae', 'vae']):
"""
Determine the specific model with the lowest loss using reconstruction cost
Arguments:
matrix_list - list of matrices (either weight or z matrices)
reconstruction_df - pandas DataFrame (seed by reconstruction error)
algorithms - which algorithms to consider
Output:
Single matrix of the "best" alternative matrices by lowest recon error
"""
final_matrix_list = []
for alg in algorithms:
# Get lowest reconstruction error across iterations for an algorithm
min_recon_subset = reconstruction_df.loc[:, alg].idxmin()
# subset the matrix to the minimum loss
best_matrix = matrix_list[min_recon_subset]
# Extract the algorithm specific columns from the concatenated matrix
use_cols = best_matrix.columns.str.startswith(alg)
best_matrix_subset = best_matrix.loc[:, use_cols]
# Build the final matrix that will eventually be output
final_matrix_list.append(best_matrix_subset)
return | pd.concat(final_matrix_list, axis=1) | pandas.concat |
from typing import List
import pandas as pd
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import streamlit as st
CLOSE_MATCH_DIFF = 10
COLORS = ['#EC7063', '#AF7AC5', '#5DADE2', '#48C9B0', '#F9E79F',
'#E59866', '#F06292', '#58D68D', '#AED6F1', '#F8BBD0',
'#6488EA', '#76424E', '#E4CBFF', '#FEF69E', '#BCECAC', '#13EAC9']
COLORS += COLORS
MARKERS = ['circle', 'square', 'diamond', 'cross', 'x', 'triangle-up',
'triangle-down', 'triangle-left', 'triangle-right', 'pentagon',
'star', 'star-diamond', 'diamond-tall', 'diamond-wide', 'hourglass']
MARKERS += MARKERS[::-1]
class schedule:
def __init__(self, wide: pd.DataFrame):
self.long = | pd.melt(wide, id_vars=['Week'], var_name='Player', value_name='Vs') | pandas.melt |
from app import app
from bokeh.embed import components
from bokeh.plotting import figure, show
from bokeh.resources import INLINE
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import brewer
from bokeh.models import NumeralTickFormatter
from flask import render_template, flash, redirect, url_for, request, jsonify, session
from flask_login import current_user, login_user, logout_user, login_required
from datetime import datetime
from app.db import get_db, query
from app.plot import vbar, vbar_stack, line, multiline
import numpy as np
import pandas as pd
import math
@app.route('/customer', methods=['GET', 'POST'])
@login_required
def customer():
'''
Render order page
'''
date_start = request.form.get('date_start', '2018-01-01')
date_end = request.form.get('date_end', '2018-01-31')
if request.form.get('time_frame') is None:
time_frame = 'date'
else:
time_frame = request.form.get('time_frame')
time_dict = {'date': 'date', 'ww': 'week', 'mon': 'month', 'q': 'quarter'}
# Active customer number trend
act_data = get_customer_trend(date_start, date_end, time_frame)
act_js, act_div = line(act_data, time_dict[time_frame], 'customer_number', 'number')
# Order number by geo for each category
geo_data = get_num_order_by_geo(date_start, date_end)
geo_js, geo_div = vbar_stack(geo_data, 'category', 'order_number', 'number', brewer['Spectral'][9], 1,
'northeast', 'east', 'southeast', 'north', 'south', 'west', 'southwest', 'northwest', 'middle')
# Geo order trend
geo_trend_data = get_geo_order_trend(date_start, date_end, time_frame)
geo_trend_js, geo_trend_div = multiline(geo_trend_data, time_dict[time_frame], 'order_number', 'number',
'northeast', 'east', 'southeast', 'north', 'south', 'west', 'southwest', 'northwest', 'middle')
# Repeat order (same prodcut > 2 times)
repeat_data = get_repeat_order_by_time(date_start, date_end)
repeat_js, repeat_div = vbar_stack(repeat_data, 'category', 'order_number', 'number', ["#3cba54", "#f4c20b"], 0.8,
'repeated', 'unrepeated')
# Order number by gender for each category
gender_data = get_num_order_by_gender_cat(date_start, date_end)
gender_js, gender_div = vbar_stack(gender_data, 'category', 'order_number', 'number', ["#da3337", "#4986ec"], 0.8,
'female', 'male')
# Customer state distribution for top 10 states
customer_geo_data = get_customer_by_geo(date_start, date_end)
customer_geo_js, customer_geo_div = vbar(customer_geo_data, 'state', 'number', 'number')
# grab the static resources
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
html = render_template(
'customer.html',
act_js=act_js,
act_div=act_div,
customer_geo_js=customer_geo_js,
customer_geo_div=customer_geo_div,
repeat_js=repeat_js,
repeat_div=repeat_div,
gender_js=gender_js,
gender_div=gender_div,
geo_js=geo_js,
geo_div=geo_div,
geo_trend_js=geo_trend_js,
geo_trend_div=geo_trend_div,
js_resources=js_resources,
css_resources=css_resources,
date_start=date_start,
date_end=date_end,
)
return html
def get_customer_trend(date_start, date_end, time_frame):
"""
Return the trend of active customer number in the time range
"""
time_dict = {'date': 'date', 'ww': 'week', 'mon': 'month', 'q': 'quarter'}
if time_frame == 'date' or time_frame is None: # None is used for switch page default frame
sql = f"""
select salesdate, count(unique customerID)
from sales
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
group by salesdate
order by salesdate
"""
rows = query(sql)
df = pd.DataFrame(columns=['date', 'customer_number'])
for row in rows:
df.loc[len(df), :] = row
df['date'] = pd.to_datetime(df['date'])
else:
sql = f"""
select to_char(salesdate, '{time_frame}'), count(unique customerID)
from sales
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and salesdate is Not null
group by to_char(salesdate, '{time_frame}')
order by to_char(salesdate, '{time_frame}')
"""
rows = query(sql)
df = pd.DataFrame(columns=[time_dict[time_frame], 'customer_number'])
for row in rows:
df.loc[len(df), :] = row
return df
def get_num_order_by_geo(date_start, date_end):
"""
Return the number of orders in different geo region for each category in time range.
"""
sql = f"""
with geo_cat as
(select count(salesID) as order_num, city.zipcode as zipcode, productcategory.name as category
from customer, sales, product, productcategory, city
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and customer.customerID = sales.customerID
and sales.productID = product.productID
and product.categoryID = productcategory.categoryID
and customer.city = city.cityID
group by zipcode, productcategory.name)
select category,
sum(case when zipcode between 0 and 19999 then order_num else 0 end) as northeast,
sum(case when zipcode between 20000 and 29999 then order_num else 0 end) as east,
sum(case when zipcode between 30000 and 39999 then order_num else 0 end) as southeast,
sum(case when zipcode between 40000 and 59999 then order_num else 0 end) as north,
sum(case when zipcode between 70000 and 79999 then order_num else 0 end) as south,
sum(case when zipcode between 84000 and 95000 then order_num else 0 end) as west,
sum(case when zipcode between 95001 and 96999 then order_num else 0 end) as southwest,
sum(case when zipcode between 97000 and 99999 then order_num else 0 end) as nouthwest,
sum(case when zipcode between 60000 and 69999 or zipcode between 80000 and 83999 then order_num else 0 end) as middle
from geo_cat
group by category
"""
rows = query(sql)
df = pd.DataFrame(columns=['category', 'northeast', 'east', 'southeast', 'north', 'south', 'west', 'southwest',
'northwest', 'middle'])
for row in rows:
df.loc[len(df), :] = row
return df
def get_geo_order_trend(date_start, date_end, time_frame):
"""
Return trend of order number in different geo region in time range.
"""
basis_dict = {'revenue': 'sum(sales.total)', 'order_number': 'count(sales.salesID)'}
time_dict = {'date': 'date', 'ww': 'week', 'mon': 'month', 'q': 'quarter'}
if time_frame == 'date' or time_frame is None: # None is used for switch page default frame
sql = f'''
select salesdate,
sum(case when zipcode between 0 and 19999 then order_num else 0 end) as northeast,
sum(case when zipcode between 20000 and 29999 then order_num else 0 end) as east,
sum(case when zipcode between 30000 and 39999 then order_num else 0 end) as southeast,
sum(case when zipcode between 40000 and 59999 then order_num else 0 end) as north,
sum(case when zipcode between 70000 and 79999 then order_num else 0 end) as south,
sum(case when zipcode between 84000 and 95000 then order_num else 0 end) as west,
sum(case when zipcode between 95001 and 96999 then order_num else 0 end) as southwest,
sum(case when zipcode between 97000 and 99999 then order_num else 0 end) as nouthwest,
sum(case when zipcode between 60000 and 69999 or zipcode between 80000 and 83999 then order_num else 0 end) as middle
from
(select salesdate, count(salesID) as order_num, city.zipcode as zipcode
from customer, sales, city
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and customer.customerID = sales.customerID
and customer.city = city.cityID
group by salesdate, zipcode)
group by salesdate
order by salesdate
'''
rows = query(sql)
df = pd.DataFrame(columns=['date', 'northeast', 'east', 'southeast', 'north', 'south', 'west', 'southwest',
'northwest', 'middle'])
for row in rows:
df.loc[len(df), :] = row
df['date'] = pd.to_datetime(df['date'])
else:
sql = f'''
select range,
sum(case when zipcode between 0 and 19999 then order_num else 0 end) as northeast,
sum(case when zipcode between 20000 and 29999 then order_num else 0 end) as east,
sum(case when zipcode between 30000 and 39999 then order_num else 0 end) as southeast,
sum(case when zipcode between 40000 and 59999 then order_num else 0 end) as north,
sum(case when zipcode between 70000 and 79999 then order_num else 0 end) as south,
sum(case when zipcode between 84000 and 95000 then order_num else 0 end) as west,
sum(case when zipcode between 95001 and 96999 then order_num else 0 end) as southwest,
sum(case when zipcode between 97000 and 99999 then order_num else 0 end) as nouthwest,
sum(case when zipcode between 60000 and 69999 or zipcode between 80000 and 83999 then order_num else 0 end) as middle
from
(select to_char(salesdate, '{time_frame}') as range, count(salesID) as order_num, city.zipcode as zipcode
from customer, sales, city
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and customer.customerID = sales.customerID
and customer.city = city.cityID
group by to_char(salesdate, '{time_frame}'), zipcode)
group by range
order by range
'''
rows = query(sql)
df = pd.DataFrame(columns=[time_dict[time_frame], 'northeast', 'east', 'southeast', 'north', 'south', 'west',
'southwest', 'northwest', 'middle'])
for row in rows:
df.loc[len(df), :] = row
return df
def get_repeat_order_by_time(date_start, date_end):
"""
Return the number of repeated purchases (same prodcut > 2 times)
and the total number of orders for different category with the time range.
"""
sql = f"""
with orders as
(select sales.customerID as customer_id, productcategory.name as category, sales.salesID as salesID
from customer, sales, product, productcategory
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and customer.customerID = sales.customerID
and sales.productID = product.productID
and product.categoryID = productcategory.categoryID)
select avg(number_total), sum(number_repeat) as repeat, cat1 as category
from (select count(salesID) as number_total, category as cat1
from orders
group by category
)
inner join
(select count(salesID) as number_repeat, category as cat2
from orders
group by customer_id, category
having count(salesID) > 2
)
on cat1 = cat2
group by cat1
"""
# the reason use avg(number_total) is after the group by,
# for the same category, each row has same value for number_total
rows = query(sql)
df = | pd.DataFrame(columns=['total', 'repeated', 'category']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys, os
import pandas as pd
import openpyxl
from openpyxl.styles import PatternFill
import numpy as np
from collections import defaultdict
from scanner_map import searchKey, CertifiedManufacturerModelNameCTDict, CertifiedManufacturerCTDict, TrueManufacturerModelNameCTDict, TrueManufacturerCTDict
from scanner_map import ScannerType, CertifiedManufacturerModelNameICADict, CertifiedManufacturerICADict, TrueManufacturerModelNameICADict, TrueManufacturerICADict
from datetime import datetime
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting import Rule
def highlight_columns(sheet, columns=[], color='A5A5A5', offset=2):
for col in columns:
cell = sheet.cell(1, col+offset)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
return sheet
def merge_defaultdicts(d,d1):
for k,v in d1.items():
if (k in d):
d[k].update(d1[k])
else:
d[k] = d1[k]
return d
def covertDate(date_str):
month_lookup = defaultdict(lambda: None, {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6, 'JUL':7, 'AUG':8,'SEP':9, 'OCT':10,'NOV':11, 'DEC':12})
day = str(date_str[0:2])
month = str(month_lookup[date_str[2:5]])
year = date_str[5:9]
s = year + month + day
return datetime.strptime(s, '%Y%m%d')
def checkModalities(modList0, modList1):
for m0 in modList0:
for m1 in modList1:
if m0==m1:
return True
return False
def splitScannerList(filepath_scanner):
#filepath_scanner = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence_V05_manual.xlsx'
df_scanner = pd.read_excel(filepath_scanner, 'linear', index_col=0)
df_missing_CT = pd.DataFrame(columns=df_scanner.columns)
df_missing_XA = pd.DataFrame(columns=df_scanner.columns)
df_missing = df_scanner[(df_scanner['ECRF_MISSING']==True) & (df_scanner['ITT']!=2)]
for index, row in df_missing.iterrows():
if 'DICOM XA' in row['ManualCheck']:
df_missing_XA = df_missing_XA.append(row)
if 'DICOM CT' in row['ManualCheck']:
df_missing_CT = df_missing_CT.append(row)
# Update CT sheet
writer = pd.ExcelWriter(filepath_scanner, engine="openpyxl", mode="a")
# Update CT sheet
sheet_name = 'ECRF_MISSING_CT'
workbook = writer.book
df_missing_CT.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = workbook[sheet_name]
# Update XA sheet
sheet_name = 'ECRF_MISSING_XA'
workbook = writer.book
df_missing_XA.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = workbook[sheet_name]
writer.save()
# Read discharge data
filepath_dicom = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/discharge_dicom_27082020_OT.xlsx'
filepath_ecrf_study = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/ecrf_study_20200827.xlsx'
filepath_scanner_old = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence_V04_manual.xlsx'
filepath_scanner = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence.xlsx'
df_dicom = pd.read_excel(filepath_dicom, 'linear', index_col=0)
#df_dicom=df_dicom[0:1000]
df_dicom.replace(to_replace=[np.nan], value='', inplace=True)
df_ecrf = pd.read_excel(filepath_ecrf_study, 'Tabelle1')
#df_ecrf=df_ecrf[0:1000]
df_scanner_old = pd.read_excel(filepath_scanner_old, 'linear', index_col=0)
df_scanner_old.replace(to_replace=[np.nan], value='', inplace=True)
columns_scanner_rename=['PatientID', 'Site', 'ITT', 'RD_MB', '1. Date of CT', 'Date of ICA scan',
'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3', 'duplicate entry', 'FFR', 'MRI_visite',
'Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan',
'Date cec_ct', 'Date pet ct', 'Date ldct', 'ldct 3m', 'ldct 6m',
'ldct 12m', 'Date FU ICA scan']
columns_scanner=['PatientID', 'Site', 'ITT', 'RD_MB',
'1. Date of CT', '1. Date of CT StudyInstanceUID',
'Date of ICA scan', 'Date of ICA scan StudyInstanceUID',
'Date of ICA scan 2', 'Date of ICA scan 2 StudyInstanceUID',
'Date of staged PCI 1', 'Date of staged PCI 1 StudyInstanceUID',
'Date of staged PCI 2', 'Date of staged PCI 2 StudyInstanceUID',
'Date of staged PCI 3', 'Date of staged PCI 3 StudyInstanceUID',
'duplicate entry',
'FFR', 'FFR StudyInstanceUID',
'MRI_visite',
'Date of Echo', 'Date of Echo StudyInstanceUID',
'Date of PET', 'Date of PET StudyInstanceUID',
'Date of SPECT:', 'Date of SPECT: StudyInstanceUID',
'Date of FU_CT-scan', 'Date of FU_CT-scan StudyInstanceUID',
'Date cec_ct', 'Date cec_ct StudyInstanceUID',
'Date pet ct', 'Date pet ct StudyInstanceUID',
'Date ldct', 'Date ldct StudyInstanceUID',
'ldct 3m', 'ldct 3m StudyInstanceUID',
'ldct 6m', 'ldct 6m StudyInstanceUID',
'ldct 12m', 'ldct 12m StudyInstanceUID',
'Date FU ICA scan', 'Date FU ICA scan StudyInstanceUID']
columns_scanner_missing = [x for x in columns_scanner if x not in columns_scanner_rename]
#columns_result = ['OK', 'DICOM_MISSING', 'ECRF_MISSING', 'DICOM_ECRF_MISMATCH']
columns_result = ['DICOM_MISSING', 'ECRF_MISSING', 'ECRF_MISSING_SeriesInstanceUID']
columns_ecrf=['Patient identifier', 'Centre name (mnpctrname)', 'ITT', 'RD_MB', '1. Date of CT', 'Date of ICA scan',
'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3', 'duplicate entry ', 'FFR', 'MRI_visite',
'Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan:',
'Date cec_ct', 'Date pet ct', 'Date ldct:', 'ldct 3m', 'ldct 6m',
'ldct 12m', 'Date FU ICA scan:']
dates_required = ['1. Date of CT', 'Date of ICA scan', 'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3']
modalities_required = defaultdict(lambda: None, {'1. Date of CT': ['CT'], 'Date of ICA scan': ['XA'], 'Date of ICA scan 2': ['XA'],
'Date of staged PCI 1': ['XA'], 'Date of staged PCI 2': ['XA'], 'Date of staged PCI 3': ['XA']})
dates_sidestudy = ['FFR','Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan',
'Date cec_ct', 'Date pet ct', 'Date ldct', 'ldct 3m', 'ldct 6m','ldct 12m', 'Date FU ICA scan']
modalities_sidestudy = defaultdict(lambda: None, {'FFR': ['XA'], 'Date of Echo': ['US'], 'Date of PET': ['CT','PT'], 'Date of SPECT:': ['CT','NM'], 'Date of FU_CT-scan': ['CT'],
'Date cec_ct': ['CT'], 'Date pet ct': ['PT'], 'Date ldct': ['CT'], 'ldct 3m': ['CT'], 'ldct 6m': ['CT'],'ldct 12m': ['CT'],
'Date FU ICA scan': ['XA']})
dates_all = dates_required + dates_sidestudy
# f = 'H:/cloud/cloud_data/Projects/BIOQIC/08_Research/PACSServer/date.sas7bdat'
# f = 'C:/Users/bernifoellmer/Downloads/SASVisualForecasting_sampledatasets/skinproduct_vfdemo.sas7bdat'
# db = pd.read_sas(f)
# Create dataframe with patient per line
df_scanner = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# # From Multilayer Networks to Deep Graphs
# ## The Noordin Top Terrorist Data
# ### Preprocessing
# In[1]:
# data i/o
import os
import subprocess
import zipfile
# for plots
import matplotlib.pyplot as plt
# the usual
import numpy as np
import pandas as pd
import deepgraph as dg
# notebook display
# get_ipython().magic('matplotlib inline')
# pd.options.display.max_rows = 10
# pd.set_option('expand_frame_repr', False)
# ### Preprocessing the Nodes
# In[2]:
# zip file containing node attributes
os.makedirs("tmp", exist_ok=True)
get_nodes_zip = ("wget -O tmp/terrorist_nodes.zip "
"https://sites.google.com/site/sfeverton18/"
"research/appendix-1/Noordin%20Subset%20%28ORA%29.zip?"
"attredirects=0&d=1")
subprocess.call(get_nodes_zip.split())
# unzip
zf = zipfile.ZipFile('tmp/terrorist_nodes.zip')
zf.extract('Attributes.csv', path='tmp/')
zf.close()
# create node table
v = pd.read_csv('tmp/Attributes.csv')
v.rename(columns={'Unnamed: 0': 'Name'}, inplace=True)
# create a copy of all nodes for each layer (i.e., create "node-layers")
# there are 10 layers and 79 nodes on each layer
v = pd.concat(10*[v])
# add "aspect" as column to v
layer_names = ['Business', 'Communication', 'O Logistics', 'O Meetings',
'O Operations', 'O Training', 'T Classmates', 'T Friendship',
'T Kinship', 'T Soulmates']
layers = [[name]*79 for name in layer_names]
layers = [item for sublist in layers for item in sublist]
v['layer'] = layers
# set unique node index
v.reset_index(inplace=True)
v.rename(columns={'index': 'V_N'}, inplace=True)
# swap columns
cols = list(v)
cols[1], cols[10] = cols[10], cols[1]
v = v[cols]
# get rid of the attribute columns for demonstrational purposes,
# will be inserted again later
v, vinfo = v.iloc[:, :2], v.iloc[:, 2:]
# ### Preprocessing the Edges
# In[3]:
# paj file containing edges for different layers
get_paj = ("wget -O tmp/terrorists.paj "
"https://sites.google.com/site/sfeverton18/"
"research/appendix-1/Noordin%20Subset%20%28Pajek%29.paj?"
"attredirects=0&d=1")
subprocess.call(get_paj.split())
# get data blocks from paj file
with open('tmp/terrorists.paj') as txtfile:
comments = []
data = []
part = []
for line in txtfile:
if line.startswith('*'):
# comment lines
comment = line
comments.append(comment)
if part:
data.append(part)
part = []
else:
# vertices
if comment.startswith('*Vertices') and len(line.split()) > 1:
sublist = line.split('"')
sublist = sublist[:2] + sublist[-1].split()
part.append(sublist)
# edges or partitions
elif not line.isspace():
part.append(line.split())
# append last block
data.append(part)
# extract edge tables from data blocks
ecomments = []
eparts = []
for i, c in enumerate(comments):
if c.startswith('*Network'):
del data[0]
elif c.startswith('*Partition'):
del data[0]
elif c.startswith('*Vector'):
del data[0]
elif c.startswith('*Arcs') or c.startswith('*Edges'):
ecomments.append(c)
eparts.append(data.pop(0))
# layer data parts (indices found manually via comments)
inds = [11, 10, 5, 6, 7, 8, 0, 1, 2, 3]
eparts = [eparts[ind] for ind in inds]
# convert to DataFrames
layer_frames = []
for name, epart in zip(layer_names, eparts):
frame = pd.DataFrame(epart, dtype=np.int16)
# get rid of self-loops, bidirectional edges
frame = frame[frame[0] < frame[1]]
# rename columns
frame.rename(columns={0: 's', 1: 't', 2: name}, inplace=True)
frame['s'] -= 1
frame['t'] -= 1
layer_frames.append(frame)
# set indices
for i, e in enumerate(layer_frames):
e['s'] += i*79
e['t'] += i*79
e.set_index(['s', 't'], inplace=True)
# concat the layers
e = | pd.concat(layer_frames) | pandas.concat |
import pandas as pd
import sys, os
from collections import OrderedDict
from viola.core.bedpe import Bedpe
from viola.core.vcf import Vcf
from typing import (
List,
Optional,
)
class MultiBedpe(Bedpe):
"""
A database-like object that contains information of multiple BEDPE files.
In this class, main keys in most tables are "global id" instead of using
"SV id" from SV callers. "global id" is unique ID of all the SV record
across all the samples.
"""
_internal_attrs = [
"_df_id",
"_df_patients",
"_df_svpos",
"_odict_df_info",
"_ls_patients",
"_ls_infokeys",
"_odict_alltables",
"_repr_config",
"_sig_criteria"
]
_internal_attrs_set = set(_internal_attrs)
_repr_column_names = [
"id",
"bp1",
"bp2",
"strand",
"qual",
"svtype",
]
_repr_column_names_set = set(_repr_column_names)
def __init__(
self,
ls_bedpe: List[Bedpe] = None,
ls_patient_names: List[str] = None,
direct_tables: Optional[List[pd.DataFrame]] = None
):
if direct_tables is None:
df_id, df_patients, df_svpos, odict_df_info = self.__init__from_ls_bedpe(ls_bedpe, ls_patient_names)
self.__init__common(df_id, df_patients, df_svpos, odict_df_info)
else:
self.__init__common(*direct_tables)
def __init__from_ls_bedpe(self, ls_bedpe, ls_patient_names):
ls_df_id = []
ls_df_svpos = []
dict_ls_df_info = dict()
ls_patient_id = [i for i in range(len(ls_patient_names))]
df_patients = pd.DataFrame({'id': ls_patient_id, 'patients': ls_patient_names})
for bedpe, patient_id, patient_name in zip(ls_bedpe, ls_patient_id, ls_patient_names):
df_svpos = bedpe.get_table('positions')
df_id = df_svpos[['id']].copy()
df_id['patient_id'] = patient_id
df_id['global_id'] = str(patient_name) + '_' + df_id['id'].astype(str)
df_id = df_id[['global_id', 'patient_id', 'id']]
ls_df_id.append(df_id)
df_svpos['id'] = str(patient_name) + '_' + df_svpos['id'].astype(str)
ls_df_svpos.append(df_svpos)
for key, value in bedpe._odict_df_info.items():
value = value.copy()
value['id'] = str(patient_name) + '_' + value['id'].astype(str)
if dict_ls_df_info.get(key) is None:
dict_ls_df_info[key] = [value]
else:
dict_ls_df_info[key].append(value)
df_concat_id = pd.concat(ls_df_id, ignore_index=True)
df_concat_svpos = pd.concat(ls_df_svpos, ignore_index=True)
odict_df_info = OrderedDict()
for key, value in dict_ls_df_info.items():
odict_df_info[key] = pd.concat(value)
return (df_concat_id, df_patients, df_concat_svpos, odict_df_info)
def __init__common(self, df_id, df_patients, df_svpos, odict_df_info):
self._df_id = df_id
self._df_patients = df_patients
self._ls_patients = df_patients['patients'].to_list()
self._df_svpos = df_svpos
self._odict_df_info = odict_df_info
self._ls_infokeys = [x.lower() for x in odict_df_info.keys()]
ls_keys = ['global_id', 'patients', 'positions'] + self._ls_infokeys
ls_values = [df_id, df_patients, df_svpos] + list(odict_df_info.values())
self._odict_alltables = OrderedDict([(k, v) for k, v in zip(ls_keys, ls_values)])
self._repr_config = {
'info': None,
}
def filter_by_id(self, arrlike_id):
"""
filter_by_id(arrlike_id)
Filter MultiBedpe object according to the list of SV ids.
Return object is also an instance of the MultiBedpe object
Parameters
---------------
arrlike_id: list-like
Global ids which you would like to keep.
Returns
---------------
MultiBedpe
A MultiBedpe object with the SV id specified in the arrlike_id argument.
All records associated with SV ids that are not in the arrlike_id will be discarded.
"""
df_global_id = self.get_table('global_id')
out_global_id = df_global_id.loc[df_global_id['global_id'].isin(arrlike_id)].reset_index(drop=True)
out_patients = self.get_table('patients')
out_svpos = self._filter_by_id('positions', arrlike_id)
out_odict_df_info = OrderedDict([(k, self._filter_by_id(k, arrlike_id)) for k in self._ls_infokeys])
return MultiBedpe(direct_tables=[out_global_id, out_patients, out_svpos, out_odict_df_info])
def classify_manual_svtype(self, definitions=None, ls_conditions=None, ls_names=None, ls_order=None, return_data_frame=True, exclude_empty_cases=False):
"""
classify_manual_svtype(definitions, ls_conditions, ls_names, ls_order=None, exclude_empty_cases=False)
Classify SV records by user-defined criteria. A new INFO table named
'manual_sv_type' will be created.
Parameters
------------
definitions: path_or_buf or str, default None
Path to the file which specifies the definitions of custom SV classification. This argument is disabled when "ls_condition" is not None.
If "default" is specified, the simple length-based SV classification will be employed.
If "article" is specified, the same definition file which was used in the Viola publication will be reflected.
Below is the links to each of definition file you can specify on this method.
"default" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_default.txt
"article" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_article.txt
ls_conditions: List[callable] or List[str], default None
List of definitions of custom SV classification. The data type of the elements in the list can be callable or SV ID (str).
callable --> Functions that takes a self and returns a list of SV ID that satisfy the conditions of the SV class to be defined.
SV ID --> Lists of SV ID that satisfy the conditions of the SV class to be defined.
This argument is disabled when "definitions" is not None.
ls_names: List[str], default None
List of the names of the custom SV class corresponding to the "ls_conditions". This argument is disabled when "definitions" is not None.
return_series: bool, default True
Return counts of each custom SV class as a pd.Series.
exclude_empty_cases: bool, default False
If True, samples which have no SV record will be excluded.
Returns
---------
pd.DataFrame or None
"""
set_ids_current = set(self.ids)
obj = self
ls_ids = []
ls_result_names = []
if definitions is not None:
if isinstance(definitions, str):
if definitions == "default":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_default.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
elif definitions == "article":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_article.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(definitions)
for cond, name in zip(ls_conditions, ls_names):
obj = obj.filter_by_id(set_ids_current)
if callable(cond):
ids = cond(obj)
else:
ids = cond
set_ids = set(ids)
set_ids_intersection = set_ids_current & set_ids
ls_ids += list(set_ids_intersection)
ls_result_names += [name for i in range(len(set_ids_intersection))]
set_ids_current = set_ids_current - set_ids_intersection
ls_ids += list(set_ids_current)
ls_result_names += ['others' for i in range(len(set_ids_current))]
ls_zeros = [0 for i in range(len(self.ids))]
df_result = pd.DataFrame({'id': ls_ids, 'value_idx': ls_zeros, 'manual_sv_type': ls_result_names})
self.add_info_table('manual_sv_type', df_result)
if return_data_frame:
if ls_order is None:
pd_ind_reindex = pd.Index(ls_names + ['others'])
else:
pd_ind_reindex = pd.Index(ls_order)
df_feature_counts = self.get_feature_count_as_data_frame(ls_order=pd_ind_reindex, exclude_empty_cases=exclude_empty_cases)
return df_feature_counts
def get_feature_count_as_data_frame(self, feature='manual_sv_type', ls_order=None, exclude_empty_cases=False):
df_feature = self.get_table(feature)
df_id = self.get_table('global_id')
df_patients = self.get_table('patients')
df_merged = pd.merge(df_feature, df_id, left_on='id', right_on='global_id')
df_merged = df_merged.merge(df_patients, left_on='patient_id', right_on='id')
df_feature_counts = df_merged.pivot_table('global_id', index='patients', columns=feature, aggfunc='count', fill_value=0)
if not exclude_empty_cases:
df_feature_counts = df_feature_counts.reindex(self._ls_patients, fill_value=0)
if ls_order is not None:
pd_ind_reindex = pd.Index(ls_order, name=feature)
df_feature_counts = df_feature_counts.reindex(columns=pd_ind_reindex, fill_value=0)
return df_feature_counts
class MultiVcf(Vcf):
"""
A database-like object that contains information of multiple Vcf files.
In this class, main keys in most tables are "global id" instead of using
"SV id" from SV callers. "global id" is unique ID of all the SV record
across all the samples.
"""
_internal_attrs = [
"_df_id",
"_df_patients",
"_df_svpos",
"_odict_df_info",
"_ls_patients",
"_ls_infokeys",
"_odict_alltables",
"_repr_config",
"_sig_criteria"
]
_internal_attrs_set = set(_internal_attrs)
_repr_column_names = [
"id",
"bp1",
"bp2",
"strand",
"qual",
"svtype",
]
_repr_column_names_set = set(_repr_column_names)
def __init__(
self,
ls_vcf: List[Vcf] = None,
ls_patient_names: List[str] = None,
direct_tables: Optional[List[pd.DataFrame]] = None
):
if direct_tables is None:
df_id, df_patients, df_svpos, df_filters, odict_df_info, df_formats, odict_df_headers = self.__init__from_ls_vcf(ls_vcf, ls_patient_names)
self.__init__common(df_id, df_patients, df_svpos, df_filters, odict_df_info, df_formats, odict_df_headers)
else:
self.__init__common(*direct_tables)
def __init__from_ls_vcf(self, ls_vcf, ls_patient_names):
ls_df_id = []
ls_df_svpos = []
ls_df_filters = []
odict_ls_df_info = OrderedDict()
ls_df_formats = []
odict_ls_df_headers = OrderedDict()
# Header Integration
for vcf, patient_name in zip(ls_vcf, ls_patient_names):
for key, value in vcf._odict_df_headers.items():
value = value.copy()
if odict_ls_df_headers.get(key) is None:
odict_ls_df_headers[key] = [value]
else:
odict_ls_df_headers[key].append(value)
odict_df_headers = OrderedDict()
for key, value in odict_ls_df_headers.items():
for idx, df in enumerate(value):
if idx == 0:
df_merged = df
continue
on = list(df_merged.columns)
df_merged = df_merged.merge(df, how='outer', on=on)
odict_df_headers[key] = df_merged
# /Header Integration
ls_patient_id = [i for i in range(len(ls_patient_names))]
df_patients = pd.DataFrame({'id': ls_patient_id, 'patients': ls_patient_names})
for vcf, patient_id, patient_name in zip(ls_vcf, ls_patient_id, ls_patient_names):
df_svpos = vcf.get_table('positions')
df_filters = vcf.get_table('filters')
df_formats = vcf.get_table('formats')
df_id = df_svpos[['id']].copy()
df_id['patient_id'] = patient_id
df_id['global_id'] = str(patient_name) + '_' + df_id['id'].astype(str)
df_id = df_id[['global_id', 'patient_id', 'id']]
ls_df_id.append(df_id)
df_svpos['id'] = str(patient_name) + '_' + df_svpos['id'].astype(str)
ls_df_svpos.append(df_svpos)
df_filters['id'] = str(patient_name) + '_' + df_filters['id'].astype(str)
ls_df_filters.append(df_filters)
df_formats['id'] = str(patient_name) + '_' + df_formats['id'].astype(str)
ls_df_formats.append(df_formats)
for info in odict_df_headers['infos_meta'].id:
df_info_ = vcf._odict_df_info.get(info, None)
if df_info_ is None:
df_info = pd.DataFrame(columns=('id', 'value_idx', info.lower()))
else:
df_info = df_info_.copy()
df_info['id'] = str(patient_name) + '_' + df_info['id'].astype(str)
if odict_ls_df_info.get(info) is None:
odict_ls_df_info[info] = [df_info]
else:
odict_ls_df_info[info].append(df_info)
df_concat_id = pd.concat(ls_df_id, ignore_index=True)
df_concat_svpos = pd.concat(ls_df_svpos, ignore_index=True)
df_concat_filters = pd.concat(ls_df_filters, ignore_index=True)
df_concat_formats = pd.concat(ls_df_formats, ignore_index=True)
odict_df_info = OrderedDict()
for key, value in odict_ls_df_info.items():
odict_df_info[key] = pd.concat(value)
return (df_concat_id, df_patients, df_concat_svpos, df_concat_filters, odict_df_info, df_concat_formats, odict_df_headers)
def __init__common(self, df_id, df_patients, df_svpos, df_filters, odict_df_info, df_formats, odict_df_headers = {}):
self._df_id = df_id
self._df_patients = df_patients
self._df_svpos = df_svpos
self._df_filters = df_filters
self._odict_df_info = odict_df_info
self._df_formats = df_formats
self._odict_df_headers = odict_df_headers
self._ls_patients = df_patients['patients'].to_list()
self._ls_infokeys = [ x.lower() for x in odict_df_headers['infos_meta']['id'].tolist()]
ls_keys = ['global_id', 'patients', 'positions', 'filters'] + self._ls_infokeys + ['formats'] + \
list(odict_df_headers.keys())
ls_values = [df_id, df_patients, df_svpos, df_filters] + list(odict_df_info.values()) + [df_formats] + list(odict_df_headers.values())
self._odict_alltables = OrderedDict([(k, v) for k, v in zip(ls_keys, ls_values)])
self._repr_config = {
'info': None,
}
def filter_by_id(self, arrlike_id):
"""
filter_by_id(arrlike_id)
Filter MultiBedpe object according to the list of SV ids.
Return object is also an instance of the MultiBedpe object
Parameters
---------------
arrlike_id: list-like
Global ids which you would like to keep.
Returns
---------------
MultiBedpe
A MultiBedpe object with the SV id specified in the arrlike_id argument.
All records associated with SV ids that are not in the arrlike_id will be discarded.
"""
df_global_id = self.get_table('global_id')
out_global_id = df_global_id.loc[df_global_id['global_id'].isin(arrlike_id)].reset_index(drop=True)
out_patients = self.get_table('patients')
out_svpos = self._filter_by_id('positions', arrlike_id)
out_filters = self._filter_by_id('filters', arrlike_id)
out_odict_df_info = OrderedDict([(k, self._filter_by_id(k, arrlike_id)) for k in self._ls_infokeys])
out_formats = self._filter_by_id('formats', arrlike_id)
out_odict_df_headers = self._odict_df_headers.copy()
return MultiVcf(direct_tables=[out_global_id, out_patients, out_svpos, out_filters, out_odict_df_info, out_formats, out_odict_df_headers])
def classify_manual_svtype(self, definitions=None, ls_conditions=None, ls_names=None, ls_order=None, return_data_frame=True, exclude_empty_cases=False):
"""
classify_manual_svtype(definitions, ls_conditions, ls_names, ls_order=None, exclude_empty_cases=False)
Classify SV records by user-defined criteria. A new INFO table named
'manual_sv_type' will be created.
Parameters
------------
definitions: path_or_buf or str, default None
Path to the file which specifies the definitions of custom SV classification. This argument is disabled when "ls_condition" is not None.
If "default" is specified, the simple length-based SV classification will be employed.
If "article" is specified, the same definition file which was used in the Viola publication will be reflected.
Below is the links to each of definition file you can specify on this method.
"default" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_default.txt
"article" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_article.txt
ls_conditions: List[callable] or List[str], default None
List of definitions of custom SV classification. The data type of the elements in the list can be callable or SV ID (str).
callable --> Functions that takes a self and returns a list of SV ID that satisfy the conditions of the SV class to be defined.
SV ID --> Lists of SV ID that satisfy the conditions of the SV class to be defined.
This argument is disabled when "definitions" is not None.
ls_names: List[str], default None
List of the names of the custom SV class corresponding to the "ls_conditions". This argument is disabled when "definitions" is not None.
return_series: bool, default True
Return counts of each custom SV class as a pd.Series.
exclude_empty_cases: bool, default False
If True, samples which have no SV record will be excluded.
Returns
---------
pd.DataFrame or None
"""
set_ids_current = set(self.ids)
obj = self
ls_ids = []
ls_result_names = []
if definitions is not None:
if isinstance(definitions, str):
if definitions == "default":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_default.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
elif definitions == "article":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_article.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(definitions)
for cond, name in zip(ls_conditions, ls_names):
obj = obj.filter_by_id(set_ids_current)
if callable(cond):
ids = cond(obj)
else:
ids = cond
set_ids = set(ids)
set_ids_intersection = set_ids_current & set_ids
ls_ids += list(set_ids_intersection)
ls_result_names += [name for i in range(len(set_ids_intersection))]
set_ids_current = set_ids_current - set_ids_intersection
ls_ids += list(set_ids_current)
ls_result_names += ['others' for i in range(len(set_ids_current))]
ls_zeros = [0 for i in range(len(self.ids))]
df_result = pd.DataFrame({'id': ls_ids, 'value_idx': ls_zeros, 'manual_sv_type': ls_result_names})
self.add_info_table('manual_sv_type', df_result, number=1, type_='String', description='Custom SV class defined by user')
if return_data_frame:
if ls_order is None:
pd_ind_reindex = | pd.Index(ls_names + ['others']) | pandas.Index |
from crop_model_en import Irrigation
class Optimization(Irrigation):
def __init__(self):
super().__init__()
self.year=None
self.optimal_dates_irrigation = None
self.num_process = 12
self.optimizer_counter = 0
self.container_of_mean_yields = []
self.container_of_mean_water_loss = []
self.container_of_irrigation_amount = []
self.irrigation_dates_for_many_years_optim = None
def minimize_function_20_years_hpc(self, x):
"""
Minimize this to define optimal day for irrigation
"""
inputs_years = np.arange(self.NASA_start_year, self.NASA_start_year+20)
#dates_irrigation = self.irrigation_dates(x)
self.irrigation_dates_for_many_years_optim = self.irrigation_dates(x)
pool = multiprocessing.Pool(processes=self.num_process)
crop_sim_for_20_years = pool.map(self.crop_hpc_20_years, inputs_years)
yield_of_crop_sim_for_20_years = [crop_sim_for_20_years[i]['TWSO'][-1] for i in range(len(crop_sim_for_20_years))]
out = np.mean(yield_of_crop_sim_for_20_years)
return -out
def minimize_function_20_years(self, x):
"""
Minimize this to define optimal day for irrigation for 20 first years
"""
inputs_years = np.arange(self.NASA_start_year, self.NASA_start_year+20)
#dates_irrigation = self.irrigation_dates(x)
self.irrigation_dates_for_many_years_optim = self.irrigation_dates(x)
crop_sim_for_20_years = []
for year in inputs_years:
#change year from json-year to historical
self.date_crop_start = self.year_changer(self.user_parameters['crop_start'],year)
self.date_crop_end = self.year_changer(self.user_parameters['crop_end'],year)
#convet dates from int to dt.datetime
dates_irrigation = self.irrigation_dates(x)
#Setup irrigation ammount
amounts = [3. for _ in range(len(dates_irrigation))]
dates_irrigation = [self.year_changer(obj, year) for obj in dates_irrigation]
dates_npk, npk_list = self.user_parameters['npk_events'], self.user_parameters['npk']
dates_npk = [self.year_changer(obj, year) for obj in dates_npk]
agromanagement = self.agromanager_writer(self.user_parameters['crop_name'], dates_irrigation, dates_npk, amounts, npk_list)
self.load_model()
self.run_simulation_manager(agromanagement)
output = pd.DataFrame(self.output).set_index("day")
crop_sim_for_20_years.append(output)
#select only last day crop yield
yield_of_crop_sim_for_20_years = [crop_sim_for_20_years[i]['TWSO'][-1] for i in range(len(crop_sim_for_20_years))]
# calculate mean
out = np.mean(yield_of_crop_sim_for_20_years)
return -out
def crop_hpc_20_years(self, year):
self.date_crop_start = self.year_changer(self.user_parameters['crop_start'],year)
self.date_crop_end = self.year_changer(self.user_parameters['crop_end'],year)
## main dif here, we use self.irrigation dates instead of user_parameters
dates_irrigation = self.irrigation_dates_for_many_years_optim
amounts = [3. for _ in range(len(dates_irrigation))]
dates_irrigation = [self.year_changer(obj, year) for obj in dates_irrigation]
dates_npk, npk_list = self.user_parameters['npk_events'], self.user_parameters['npk']
dates_npk = [self.year_changer(obj, year) for obj in dates_npk]
agromanagement = self.agromanager_writer(self.user_parameters['crop_name'], dates_irrigation, dates_npk, amounts, npk_list)
self.load_model()
self.run_simulation_manager(agromanagement)
output = | pd.DataFrame(self.output) | pandas.DataFrame |
# # # # # # # # # # # # # # # # # # # # #
# #
# By <NAME> #
# Version 0.0.1 #
# #
# # # # # # # # # # # # # # # # # # # # #
import numpy as np
from math import pi
import pandas as pd
from itertools import cycle
from bokeh.plotting import figure
from bokeh.tile_providers import get_provider
from bokeh.models import Title, ColumnDataSource, Circle
from bokeh.transform import cumsum
from bokeh.palettes import brewer,all_palettes,Category20c
from bokeh.transform import dodge
import datetime
###############################################################################
color_limits = ['#259B00','#CFE301','#FF5733','#900C3F']
hi_limits = np.array([0.25,0.5,0.75,1])
pkmn_colors = ['#78C850', # Grass
'#F08030', # Fire
'#6890F0', # Water
'#A8B820', # Bug
#'#A8A878', # Normal
'#A040A0', # Poison
'#F8D030', # Electric
#'#E0C068', # Ground
#'#EE99AC', # Fairy
'#C03028', # Fighting
'#F85888', # Psychic
'#B8A038', # Rock
'#705898', # Ghost
'#98D8D8', # Ice
'#7038F8', # Dragon
]
pkmn_type_colors = cycle(pkmn_colors)
def limit_color(val):
color_id = np.where(hi_limits>val)[-1]
color_id = color_id[0]
color = color_limits[color_id]
return color
### Plot Maps
def Plot_Asset_Location(Data, TITLE,Index='HI',Factor=1):
TOOLTIPS = [
#("Ranking", "$index"),
("Name", "@Name"),
("Substation", "@Loc"),
#("Brand", "@Brand"),
("Year", "@Year"),
("HI", "@HI"),
("EL", "@EL"),
#("RI", "@RI"),
#("OI", "@OI"),
("Type", "@Type"),
]
lat = Data.Loc_x
lon = Data.Loc_y
df = Data.copy()
df['lat'] = lat
df['lon'] = lon
df['size'] = (1+np.exp(df[Index].values*4))*2
df['colors'] = [limit_color(i) for i in df[Index].values]
#df = df.drop(['geometry'], axis=1)
source = ColumnDataSource(df)
x_min = np.amin(lat) #-8324909
x_max = np.amax(lat) #-8148798
y_min = np.amin(lon) #417040
y_max = np.amax(lon) #673868
fig = figure(x_range=(x_min,x_max), y_range=(y_min,y_max),
x_axis_type="mercator", y_axis_type="mercator",tooltips=TOOLTIPS,sizing_mode='stretch_width')
circle = Circle(x="lat", y="lon", size='size', fill_color='colors', fill_alpha=0.5, line_color=None)
fig.add_tile(get_provider('CARTODBPOSITRON'))
fig.add_glyph(source, circle)
return fig
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Plot_Pie_Count(df,col,title="Pie Chart"):
df1 = df.groupby([col]).count()
df1['count'] = df1.iloc[:, 0]
df1['angle'] = df1['count']/df1['count'].sum() * 2*pi
n = len(df1)
color_list = cycle(pkmn_colors)
color = [next(color_list)for i in range(n)]
df1['color'] = color
# Choose color
fig = Plot_Pie(df1,title="Pie Chart")
return fig
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Plot_Pie_Parm(df,df_porf,col,title="Pie Chart"):
df1 = df.copy()
df1 = df1.sort_values(by=[col],ascending=False)
df1['count'] = df['Name']
df1['angle'] = df1[col]/df1[col].sum() * 2*pi
df_data = pd.merge(df1, df_porf, on='Name',how='inner')
# Choose color
df_c = pd.DataFrame()
df_c['Type'] = df_porf['Type'].unique()
n = len(df_c)
color_list = cycle(pkmn_colors)
color = [next(color_list)for i in range(n)]
df_c['color'] = color
df_data = | pd.merge(df_data, df_c, on='Type',how='inner') | pandas.merge |
"""
Trading-Technical-Indicators (tti) python library
File name: test_indicators_common.py
tti.indicators package, Abstract class for common unit tests applicable for
all the indicators.
"""
from abc import ABC, abstractmethod
import pandas as pd
import matplotlib.pyplot as plt
import copy
import numpy as np
from tti.utils.exceptions import NotEnoughInputData, \
WrongTypeForInputParameter, WrongValueForInputParameter
class TestIndicatorsCommon(ABC):
# Definition of the abstract methods and properties
@abstractmethod
def assertRaises(self, *kwargs):
raise NotImplementedError
@abstractmethod
def subTest(self, **kwargs):
raise NotImplementedError
@abstractmethod
def assertEqual(self, *kwargs):
raise NotImplementedError
@abstractmethod
def assertIn(self, *kwargs):
raise NotImplementedError
@property
@abstractmethod
def indicator(self):
raise NotImplementedError
@property
@abstractmethod
def indicator_input_arguments(self):
raise NotImplementedError
@property
@abstractmethod
def indicator_other_input_arguments(self):
raise NotImplementedError
@property
@abstractmethod
def indicator_minimum_required_data(self):
raise NotImplementedError
@property
@abstractmethod
def graph_file_name(self):
raise NotImplementedError
@property
@abstractmethod
def indicator_test_data_file_name(self):
raise NotImplementedError
@property
@abstractmethod
def mandatory_arguments_missing_cases(self):
raise NotImplementedError
@property
@abstractmethod
def arguments_wrong_type(self):
raise NotImplementedError
@property
@abstractmethod
def arguments_wrong_value(self):
raise NotImplementedError
@property
@abstractmethod
def required_input_data_columns(self):
raise NotImplementedError
@property
@abstractmethod
def ti_data_rows(self):
raise NotImplementedError
precision = 4
# Unit Tests
# Validate indicators input arguments
def test_mandatory_input_arguments_missing(self):
for arguments_set in self.mandatory_arguments_missing_cases:
with self.subTest(arguments_set=arguments_set):
with self.assertRaises(TypeError):
self.indicator(**arguments_set)
def test_input_arguments_wrong_type(self):
for arguments_set in self.arguments_wrong_type:
with self.subTest(arguments_set=arguments_set):
with self.assertRaises(WrongTypeForInputParameter):
self.indicator(**arguments_set)
def test_input_arguments_wrong_value(self):
for arguments_set in self.arguments_wrong_value:
with self.subTest(arguments_set=arguments_set):
with self.assertRaises(WrongValueForInputParameter):
self.indicator(**arguments_set)
# Validate input argument: input_data
def test_argument_input_data_wrong_index_type(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=1)
with self.assertRaises(TypeError):
self.indicator(df)
def test_argument_input_data_required_column_missing(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
for missing_column in self.required_input_data_columns:
with self.subTest(
missing_column=missing_column):
with self.assertRaises(ValueError):
self.indicator(pd.DataFrame(
df.drop(columns=[missing_column])))
def test_argument_input_data_values_wrong_type(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
df.iloc[0, :] = 'no-numeric'
with self.assertRaises(ValueError):
self.indicator(df)
def test_argument_input_data_empty(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
with self.assertRaises(ValueError):
self.indicator(pd.DataFrame(df[df.index >= '2032-01-01']))
# Validate input argument: fill_missing_values
def test_argument_fill_missing_values_is_true(self):
df = pd.read_csv('./data/missing_values_data.csv', parse_dates=True,
index_col=0)
df_expected_result = pd.read_csv(
'./data/missing_values_filled.csv', parse_dates=True, index_col=0
)[self.required_input_data_columns].round(self.precision)
df_result = self.indicator(
df, fill_missing_values=True, **self.indicator_input_arguments
)._input_data[self.required_input_data_columns]
pd.testing.assert_frame_equal(df_result, df_expected_result)
def test_argument_fill_missing_values_is_false(self):
df = pd.read_csv('./data/missing_values_data.csv', parse_dates=True,
index_col=0)
df_expected_result = pd.read_csv(
'./data/missing_values_data_sorted.csv', parse_dates=True,
index_col=0)[self.required_input_data_columns].round(
self.precision)
df_result = self.indicator(
df, fill_missing_values=False, **self.indicator_input_arguments
)._input_data[self.required_input_data_columns]
pd.testing.assert_frame_equal(df_result, df_expected_result)
def test_argument_fill_missing_values_is_default_true(self):
df = pd.read_csv('./data/missing_values_data.csv', parse_dates=True,
index_col=0)
df_expected_result = pd.read_csv(
'./data/missing_values_filled.csv', parse_dates=True, index_col=0
)[self.required_input_data_columns].round(self.precision)
df_result = self.indicator(
df, **self.indicator_input_arguments
)._input_data[self.required_input_data_columns]
pd.testing.assert_frame_equal(df_result, df_expected_result)
# Validate indicator creation
def test_validate_indicator_input_data_one_row(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
if self.indicator_minimum_required_data > 1:
with self.assertRaises(NotEnoughInputData):
self.indicator(df[df.index == '2000-02-01'])
else:
self.indicator(df[df.index == '2000-02-01'])
def test_validate_indicator_less_than_required_input_data(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
if self.indicator_minimum_required_data != 1:
with self.assertRaises(NotEnoughInputData):
self.indicator(
df.iloc[:self.indicator_minimum_required_data - 1],
**self.indicator_input_arguments)
else:
pass
def test_validate_indicator_exactly_required_input_data(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
self.indicator(df.iloc[:self.indicator_minimum_required_data],
**self.indicator_input_arguments)
def test_validate_indicator_full_data(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
df_expected_result = pd.read_csv(self.indicator_test_data_file_name,
parse_dates=True, index_col=0).round(self.precision)
df_result = self.indicator(
df, **self.indicator_input_arguments)._ti_data
pd.testing.assert_frame_equal(df_expected_result, df_result,
check_dtype=False)
def test_validate_indicator_full_data_default_arguments(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
self.indicator(df)
def test_validate_indicator_full_data_other_arguments_values(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
for arguments_set in self.indicator_other_input_arguments:
with self.subTest(arguments_set=arguments_set):
self.indicator(df, **arguments_set)
# Validate API
def test_getTiGraph(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
indicator = self.indicator(df, **self.indicator_input_arguments)
# Needs manual check of the produced graph
self.assertEqual(indicator.getTiGraph(), plt)
indicator.getTiGraph().savefig(self.graph_file_name)
plt.close('all')
def test_getTiData(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
df_expected_result = pd.read_csv(self.indicator_test_data_file_name,
parse_dates=True, index_col=0).round(self.precision)
pd.testing.assert_frame_equal(
df_expected_result,
self.indicator(df, **self.indicator_input_arguments).getTiData(),
check_dtype=False)
def test_getTiValue_specific(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
df_expected_result = pd.read_csv(self.indicator_test_data_file_name,
parse_dates=True, index_col=0).round(self.precision)
self.assertEqual(list(df_expected_result.loc['2009-10-19', :]),
self.indicator(df, **self.indicator_input_arguments).
getTiValue('2009-10-19'))
def test_getTiValue_latest(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
df_expected_result = pd.read_csv(self.indicator_test_data_file_name,
parse_dates=True, index_col=0).round(self.precision)
# Adaptation for the pandas release 1.2.0, check github issue #20
expected_result = list(df_expected_result.iloc[-1])
actual_result = self.indicator(df, **self.indicator_input_arguments).\
getTiValue()
for x, y in zip(expected_result, actual_result):
try:
self.assertAlmostEqual(x, y, places=4)
except:
self.assertAlmostEqual(x, y, places=3)
def test_getTiSignal(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
self.assertIn(self.indicator(
df, **self.indicator_input_arguments).getTiSignal(),
[('buy', -1), ('hold', 0), ('sell', 1)])
def test_getTiSignal_minimum_required_data(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
self.assertIn(
self.indicator(df.iloc[:self.indicator_minimum_required_data],
**self.indicator_input_arguments).getTiSignal(),
[('buy', -1), ('hold', 0), ('sell', 1)])
def test_getTiSimulation(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
ti = self.indicator(df, **self.indicator_input_arguments)
orig_input_data = copy.deepcopy(ti._input_data)
orig_ti_data = copy.deepcopy(ti._ti_data)
simulation_data, statistics, graph = \
ti.getTiSimulation(df[['close']])
if str(self.indicator) == \
"<class 'tti.indicators._detrended_price_oscillator." + \
"DetrendedPriceOscillator'>":
self.assertEqual(simulation_data.isnull().values[:-4].any(), False)
self.assertEqual(statistics['number_of_trading_days'], 3165)
else:
self.assertEqual(simulation_data.isnull().values.any(), False)
self.assertEqual(statistics['number_of_trading_days'], 3169)
self.assertEqual(any(np.isnan(val) for val in statistics.values()),
False)
pd.testing.assert_frame_equal(ti._input_data, orig_input_data)
pd.testing.assert_frame_equal(ti._ti_data, orig_ti_data)
# Needs manual check of the produced graph
self.assertEqual(graph, plt)
graph.savefig('./figures/trading_simulation_graph_' +
str(self.indicator).split('.')[-1][:-2] + '.png')
plt.close('all')
# Validate API for specific number of rows in calculated indicator
def test_api_for_variable_ti_data_length(self):
df = pd.read_csv('./data/sample_data.csv', parse_dates=True,
index_col=0)
for rows in self.ti_data_rows:
with self.subTest(rows=rows):
ti = self.indicator( | pd.DataFrame(df) | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([ | Timestamp("20130101") | pandas.Timestamp |
# %matplotlib inline
import os, time, pickle, argparse
import pandas as pd
import torch
import torch.nn as nn
import numpy as np
from scipy.stats import beta
torch.set_printoptions(threshold=10000)
np.set_printoptions(threshold=np.inf)
parser = argparse.ArgumentParser(description='RSAutoML')
parser.add_argument('--Train_Method', type=str, default='AutoML', help='options: AutoML, Supervised')
parser.add_argument('--Val_Type', type=str, default='last_batch', help='options: last_batch, last_random')
parser.add_argument('--Loss_Type', type=str, default='MSE_sigmoid', help='options: MSE_sigmoid MSE_no_sigmoid BCEWithLogitsLoss CrossEntropyLoss')
parser.add_argument('--Data_Set', type=str, default='ml-20m', help='options: ml-20m ml-latest')
parser.add_argument('--Dy_Emb_Num', type=int, default=2, help='options: 1, 2')
args = parser.parse_args()
Model_Gpu = torch.cuda.is_available()
device = torch.device('cuda:0' if Model_Gpu else 'cpu')
DATA_PATH = './data'
DATA_SET = args.Data_Set
Batch_Size = 500 # batch size
LR_model = 0.001 # learning rate
LR_darts = 0.0001 # learning rate
Epoch = 1 # train epoch
Beta_Beta = 20 # beta for Beta distribution
H_alpha = 0 # for nn.KLDivLoss 0.001
if DATA_SET == 'ml-20m':
Train_Size = 15000000 # training dataset size
elif DATA_SET == 'ml-latest':
Train_Size = 22000000 # training dataset size
Test_Size = 5000000 # training dataset size
Emb_Size = [2, 4, 8, 16, 64, 128] # 1,2,4,8,16,32,64,128,256,512
fixed_emb_size = sum(Emb_Size)
Val_Type = args.Val_Type # last_batch last_random
Dy_Emb_Num = args.Dy_Emb_Num
Loss_Type = args.Loss_Type # MSE_sigmoid MSE_no_sigmoid BCEWithLogitsLoss CrossEntropyLoss
print('\n****************************************************************************************\n')
print('os.getpid(): ', os.getpid())
if torch.cuda.is_available():
print('torch.cuda: ', torch.cuda.is_available(), torch.cuda.current_device(), torch.cuda.device_count(), torch.cuda.get_device_name(0), torch.cuda.device(torch.cuda.current_device()))
else:
print('GPU is not available!!!')
print('Train_Size: ', Train_Size)
print('Test_Size: ', Test_Size)
print('fixed_emb_size:', fixed_emb_size)
print('Loss_Type: ', Loss_Type)
print('Val_Type: ', Val_Type)
print('Beta_Beta: ', Beta_Beta)
print('H_alpha: ', H_alpha)
print('LR_model: ', LR_model)
print('LR_darts: ', LR_darts)
print('\n****************************************************************************************\n')
def load_data():
train_features, test_features, train_target, test_target \
= pickle.load(open('{}/{}_TrainTest_{}_{}.data'.format(DATA_PATH, DATA_SET, Train_Size, Output_Dim), mode='rb'))
test_features, test_target = test_features[:Test_Size], test_target[:Test_Size]
genome_scores_dict = pickle.load(open('./{}/{}_GenomeScoresDict.data'.format(DATA_PATH, DATA_SET), mode='rb'))
train_feature_data = | pd.DataFrame(train_features, columns=['userId', 'movieId', 'user_frequency', 'movie_frequency']) | pandas.DataFrame |
from copy import deepcopy
import tempfile
import numpy as np
import pandas as pd
import pytest
from PIL import Image
from Bio import SeqIO
from Bio.Align import MultipleSeqAlignment
from seqlike import SeqLike
from seqlike.codon_tables import human_codon_table, human_codon_map, codon_table_to_codon_map
from . import test_path
# TODO: Turn this into a pytest fixture using Hypothesis.
# We might need to refactor out the fixtures a bit.
nt_seqs = [SeqLike(s, "nt") for s in SeqIO.parse(test_path / f"abs_nt_4.fasta", "fasta")]
s = SeqLike(SeqIO.read(test_path / f"test.fa", "fasta"), seq_type="dna")
s_aa = SeqLike(SeqIO.read(test_path / f"test.fa", "fasta"), seq_type="dna").aa()
s_aa_with_codon_map = SeqLike(
SeqIO.read(test_path / f"test.fa", "fasta"),
codon_map=human_codon_map,
seq_type="dna",
).aa()
seqs = [deepcopy(s)] * 10
seqs_aa = [deepcopy(s_aa)] * 10
seqs_aa_with_codon_map = [deepcopy(s_aa_with_codon_map)] * 10
seqs_mixed = deepcopy(seqs) + deepcopy(seqs_aa)
# ---- test list of seqs of various types ---------
seqs_list = [
(seqs, "nt"),
(seqs_aa, "aa"),
(seqs_aa_with_codon_map, "aa"),
pytest.param(
seqs_mixed,
None,
marks=pytest.mark.xfail(reason="Not a homogeneous list of SeqLikes."),
),
]
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_init_and__type(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df, pd.DataFrame)
assert df.seqs.seq._type == _type.upper()
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_write(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
# r+ is read-writable
with tempfile.NamedTemporaryFile(mode="r+") as tempf:
df["seqs"].seq.write(tempf, "fasta")
# rewind file after writing
tempf.seek(0)
read_seqs = pd.Series(SeqLike(s, seq_type=_type) for s in SeqIO.parse(tempf, "fasta"))
for seq1, seq2 in zip(read_seqs, df["seqs"]):
assert str(seq1) == str(seq2)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_plot(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq.plot(use_bokeh=False), Image.Image)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_align(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq.align(), pd.Series)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_as_alignment(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq.as_alignment(), MultipleSeqAlignment)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_as_counts(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
as_counts = df.seqs.seq.as_counts()
assert isinstance(as_counts, np.ndarray)
assert as_counts.shape == (max(len(s) for s in seqs), len(seqs[0].alphabet))
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_extend_ambiguous_counts(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
extended_counts = df.seqs.seq._extend_ambiguous_counts()
assert isinstance(extended_counts, np.ndarray)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_consensus(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
consensus = df.seqs.seq.consensus()
assert isinstance(consensus, SeqLike)
assert len(consensus) == max(len(s) for s in seqs)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_degenerate(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
degenerate = df.seqs.seq.degenerate()
assert isinstance(degenerate, SeqLike)
assert len(degenerate) == max(len(s) for s in seqs)
assert set(degenerate).issubset(set(df.seqs.seq.alphabet))
def test_consensus2():
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": nt_seqs})
consensus = df.seqs.seq.consensus()
assert (
str(consensus)
== "TCAATTGGGGGAGGAGCTCTGGTGGAGGCGGTAGCGGAGGCGGAGGGTCGGCTAGCCAAGTCCAATTGGTTGAATCTGGTGGTGGTGTTGTTCAACCAGGTGGTTCTTTGAGATTGTCTT"
)
def test_degenerate2():
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": nt_seqs})
degenerate = df.seqs.seq.degenerate()
assert (
str(degenerate)
== "TCAATTGGGGGAGGAGCTCTSGTGGWGGCVGTAGCGGAGKCGGAGGKTCSGCWAGCCAAGTCCAATTGGTTGAATCTGGTGGTGGTGTTGTTCAACCAGGTGGTTCTTTGAGATTGTCTT"
)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_max_length(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert df.seqs.seq.max_length() == max(len(x) for x in seqs)
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_get_seq_by_id(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert df.seqs.seq.get_seq_by_id(seqs[0].id) == seqs[0]
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test__getitem__(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq[:, :5], pd.Series)
assert len(df.seqs.seq[:, :5]) == len(df)
assert all([len(s) == 5 for s in df.seqs.seq[:, :5]])
assert isinstance(df.seqs.seq[:2, :], pd.Series)
assert len(df.seqs.seq[:2, :]) == 2
assert isinstance(df.seqs.seq[0, :], SeqLike)
assert isinstance(df.seqs.seq[0:1, :], pd.Series)
assert len(df.seqs.seq[0:1, :]) == 1
assert isinstance(df.seqs.seq[:, 0], pd.Series)
assert len(df.seqs.seq[:, 0]) == len(df)
assert df.seqs.seq.alphabet == df.seqs[1:].seq.alphabet
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_nt(seqs, _type):
# TODO: Docstring needed for test intent.
df = pd.DataFrame({"seqs": seqs})
assert isinstance(df.seqs.seq.nt(), pd.Series)
assert all([s._type == "NT" for s in df.seqs.seq.nt()])
@pytest.mark.parametrize("seqs, _type", seqs_list)
def test_aa(seqs, _type):
# TODO: Docstring needed for test intent.
df = | pd.DataFrame({"seqs": seqs}) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: | pd.Timestamp("2012-12-18 00:00:00") | pandas.Timestamp |
import calendar
from datetime import date, datetime, time
import locale
import unicodedata
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.timezones import maybe_get_tz
from pandas.core.dtypes.common import is_integer_dtype, is_list_like
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex,
bdate_range, date_range, period_range, timedelta_range)
from pandas.core.arrays import PeriodArray
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestSeriesDatetimeValues:
def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
ok_for_period = PeriodArray._datetimelike_ops
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = DatetimeIndex._datetimelike_ops
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'day_name', 'month_name']
ok_for_td = TimedeltaIndex._datetimelike_ops
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
return Series(result, index=s.index, name=s.name)
def compare(s, name):
a = getattr(s.dt, prop)
b = get_expected(s, prop)
if not (is_list_like(a) and is_list_like(b)):
assert a == b
else:
tm.assert_series_equal(a, b)
# datetimeindex
cases = [Series(date_range('20130101', periods=5), name='xxx'),
Series(date_range('20130101', periods=5, freq='s'),
name='xxx'),
Series(date_range('20130101 00:00:00', periods=5, freq='ms'),
name='xxx')]
for s in cases:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_localize('US/Eastern')
exp_values = DatetimeIndex(s.values).tz_localize('US/Eastern')
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'US/Eastern'
freq_result = s.dt.freq
assert freq_result == DatetimeIndex(s.values, freq='infer').freq
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
exp_values = (DatetimeIndex(s.values).tz_localize('UTC')
.tz_convert('US/Eastern'))
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101', periods=5, tz='US/Eastern'),
name='xxx')
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),
index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'CET'
freq_result = s.dt.freq
assert freq_result == DatetimeIndex(s.values, freq='infer').freq
# timedelta index
cases = [Series(timedelta_range('1 day', periods=5),
index=list('abcde'), name='xxx'),
Series(timedelta_range('1 day 01:23:45', periods=5,
freq='s'), name='xxx'),
Series(timedelta_range('2 days 01:23:45.012345', periods=5,
freq='ms'), name='xxx')]
for s in cases:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt, prop)
result = s.dt.components
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, s.index)
result = s.dt.to_pytimedelta()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.total_seconds()
assert isinstance(result, pd.Series)
assert result.dtype == 'float64'
freq_result = s.dt.freq
assert freq_result == TimedeltaIndex(s.values, freq='infer').freq
# both
index = date_range('20130101', periods=3, freq='D')
s = Series(date_range('20140204', periods=3, freq='s'),
index=index, name='xxx')
exp = Series(np.array([2014, 2014, 2014], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.year, exp)
exp = Series(np.array([2, 2, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.month, exp)
exp = Series(np.array([0, 1, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.second, exp)
exp = | pd.Series([s[0]] * 3, index=index, name='xxx') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 6 16:43:41 2019
@author: zhanglisama jxufe
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import xgboost as xgb
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
train = pd.read_csv('pm25_train.csv')
test = pd.read_csv('pm25_test.csv')
label = train.pop('pm2.5')
train['date'] = | pd.to_datetime(train['date']) | pandas.to_datetime |
import datetime
import json
import numpy as np
import pandas as pd
import requests
import xarray as xr
from utils import divide_chunks, get_indices_not_done, \
get_site_codes, append_to_csv_column_wise, load_s3_zarr_store,\
convert_df_to_dataset
def get_all_streamflow_data(output_file, sites_file, huc2=None,
num_sites_per_chunk=5, start_date="1970-01-01",
end_date='2019-01-01', time_scale='H',
output_format='zarr', num_site_chunks_write=6,
s3=False):
"""
gets all streamflow data for a date range for a given huc2. Calls are
chunked by station
:param output_file: [str] path to the csv file or zarr store where the data
will be stored
:param sites_file: [str] path to file that contains the nwis site
information
:param huc2: [str] zero-padded huc 2 (e.g., "02")
:param num_sites_per_chunk: [int] the number of sites that will be pulled
at in each web service call
:param start_date: [str] the start date of when you want the data for
(e.g., "1980-01-01")
:param end_date: [str] the end date of when you want the data for
(e.g., "1990-01-01")
:param time_scale: [str] Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:param output_format: [str] the format of the output file. 'csv' or 'zarr'
:param num_site_chunks_write:
:param S3:
:return: None
"""
product = get_product_from_time_scale(time_scale)
site_codes = get_site_codes(sites_file, huc2)
not_done_sites = get_indices_not_done(output_file, site_codes, 'site_code',
output_format, is_column=False,
s3=s3)
site_codes_chunked = divide_chunks(not_done_sites, num_sites_per_chunk)
# loop through site_code_chunks
chunk_dfs = []
i = 0
for site_chunk in site_codes_chunked:
last_chunk = False
if site_chunk[-1] == not_done_sites[-1]:
last_chunk = True
streamflow_df_sites = None
# catch if there is a problem on the server retrieving the data
try:
streamflow_df_sites = get_streamflow_data(site_chunk,
start_date,
end_date,
product,
time_scale)
except json.decoder.JSONDecodeError:
continue
if streamflow_df_sites is not None:
chunk_dfs.append(streamflow_df_sites)
# add the number of stations for which we got data
i += streamflow_df_sites.shape[1]
if not i % (num_site_chunks_write * num_sites_per_chunk) or \
last_chunk:
print('writing out', flush=True)
write_out_chunks(chunk_dfs, output_file, output_format)
chunk_dfs = []
def write_out_chunks(chunks_dfs, out_file, out_format):
all_chunks_df = | pd.concat(chunks_dfs, axis=1) | pandas.concat |
import abc
from datetime import date, datetime, timedelta
from io import BytesIO
import os
from textwrap import fill
from pandas._config import config
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_bool, is_float, is_integer, is_list_like
from pandas.core.frame import DataFrame
from pandas.io.common import (
_NA_VALUES,
_is_url,
_stringify_path,
_validate_header_arg,
get_filepath_or_buffer,
urlopen,
)
from pandas.io.excel._util import (
_fill_mi_header,
_get_default_writer,
_maybe_convert_usecols,
_pop_header_name,
get_writer,
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_read_excel_doc = (
"""
Read an Excel file into a pandas DataFrame.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, ExcelFile, xlrd.Book, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
.. versionadded:: 0.24.0
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
Returns a subset of the columns according to behavior above.
.. versionadded:: 0.24.0
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None, "xlrd", "openpyxl" or "odf".
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '"""
+ fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. If you don`t want to
parse some cells as date just change their type in Excel to "Text".
For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
to_excel : Write DataFrame to an Excel file.
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 NaN 1
1 NaN 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
)
@Appender(_read_excel_doc)
def read_excel(
io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds,
):
for arg in ("sheet", "sheetname", "parse_cols"):
if arg in kwds:
raise TypeError(
"read_excel() got an unexpected keyword argument `{}`".format(arg)
)
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
elif engine and engine != io.engine:
raise ValueError(
"Engine should not be specified when passing "
"an ExcelFile - ExcelFile already has the engine set"
)
return io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
class _BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer):
# If filepath_or_buffer is a url, load the data into a BytesIO
if _is_url(filepath_or_buffer):
filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read())
elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
filepath_or_buffer, _, _, _ = get_filepath_or_buffer(filepath_or_buffer)
if isinstance(filepath_or_buffer, self._workbook_class):
self.book = filepath_or_buffer
elif hasattr(filepath_or_buffer, "read"):
# N.B. xlrd.Book has a read attribute too
filepath_or_buffer.seek(0)
self.book = self.load_workbook(filepath_or_buffer)
elif isinstance(filepath_or_buffer, str):
self.book = self.load_workbook(filepath_or_buffer)
else:
raise ValueError(
"Must explicitly set engine if not passing in buffer or path for io."
)
@property
@abc.abstractmethod
def _workbook_class(self):
pass
@abc.abstractmethod
def load_workbook(self, filepath_or_buffer):
pass
@property
@abc.abstractmethod
def sheet_names(self):
pass
@abc.abstractmethod
def get_sheet_by_name(self, name):
pass
@abc.abstractmethod
def get_sheet_by_index(self, index):
pass
@abc.abstractmethod
def get_sheet_data(self, sheet, convert_float):
pass
def parse(
self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds,
):
_validate_header_arg(header)
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(dict.fromkeys(sheets).keys())
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet {sheet}".format(sheet=asheetname))
if isinstance(asheetname, str):
sheet = self.get_sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
data = self.get_sheet_data(sheet, convert_float)
usecols = _maybe_convert_usecols(usecols)
if not data:
output[asheetname] = DataFrame()
continue
if is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None and | is_list_like(header) | pandas.core.dtypes.common.is_list_like |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query,
partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(postgres_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(postgres_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "a", "str2", "b", "c", None], dtype="object"
),
"test_float": | pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64") | pandas.Series |
# This script performs the statistical analysis for the pollution growth paper
# Importing required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data.csv')
W = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data_W_reference.csv', header = None)
# Creating a reference list of nations
nations = list(data.Country.unique())
# Prepping data for pollution regression
# Data sets for individual pollutants
co2_data = data[['ln_co2', 'ln_co2_lag', 'ln_sk', 'ln_n5', 'ln_co2_intensity_rate', 'Country', 'Year', 'ln_co2_intensity_lag']].dropna()
ch4_data = data[['ln_ch4', 'ln_ch4_lag', 'ln_sk', 'ln_n5', 'ln_ch4_intensity_rate', 'Country', 'Year', 'ln_ch4_intensity_lag']].dropna()
nox_data = data[['ln_nox', 'ln_nox_lag', 'ln_sk', 'ln_n5', 'ln_nox_intensity_rate', 'Country', 'Year', 'ln_nox_intensity_lag']].dropna()
# Creating dummy variables for each pollutant
co2_national_dummies = pd.get_dummies(co2_data['Country'])
co2_year_dummies = pd.get_dummies(co2_data['Year'])
ch4_national_dummies = pd.get_dummies(ch4_data['Country'])
ch4_year_dummies = pd.get_dummies(ch4_data['Year'])
nox_national_dummies = pd.get_dummies(nox_data['Country'])
nox_year_dummies = pd.get_dummies(nox_data['Year'])
# Replacing Country and Year with fixed effects
co2_data = pd.concat([co2_data, co2_national_dummies, co2_year_dummies], axis = 1)
ch4_data = pd.concat([ch4_data, ch4_national_dummies, ch4_year_dummies], axis = 1)
nox_data = pd.concat([nox_data, nox_national_dummies, nox_year_dummies], axis = 1)
co2_data = co2_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
ch4_data = ch4_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
nox_data = nox_data.drop(['Country', 'Year', 1971, 'United States'], axis = 1)
# Create the Y and X matrices
CO2 = co2_data['ln_co2']
CH4 = ch4_data['ln_ch4']
NOX = nox_data['ln_nox']
X_CO2 = co2_data.drop(['ln_co2'], axis = 1)
X_CH4 = ch4_data.drop(['ln_ch4'], axis = 1)
X_NOX = nox_data.drop(['ln_nox'], axis = 1)
# Running pollution regressions
co2_mod = stats.OLS(CO2, X_CO2)
ch4_mod = stats.OLS(CH4, X_CH4)
nox_mod = stats.OLS(NOX, X_NOX)
models = [co2_mod, ch4_mod, nox_mod]
names = ['CO2', 'CH4', 'NOx']
res_list = []
for mod in models:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/' + names[models.index(mod)] + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Pollution/restab.txt')
# After running the conditional convergence models, we set up the network effects models
# Compute technology growth rate
# \widetilde{g} = \left(\frac{1}{T}\right)\sum\limits_{t=1}^{T}\left(\frac{\eta_{t}}{t-\gamma(t-1)}\right)
g_co2 = (1/23) * sum([(co2_mod.fit().params[i] / ((i-1971) - (co2_mod.fit().params['ln_co2_lag'] * (i-1972)))) for i in range(1972,2015)])
g_ch4 = (1/21) * sum([(ch4_mod.fit().params[i] / ((i-1971) - (ch4_mod.fit().params['ln_ch4_lag'] * (i-1972)))) for i in range(1972,2013)])
g_nox = (1/21) * sum([(nox_mod.fit().params[i] / ((i-1971) - (nox_mod.fit().params['ln_nox_lag'] * (i-1972)))) for i in range(1972,2013)])
# Add technology parameters to the dataframe
co2_tech = []
ch4_tech = []
nox_tech = []
for i in range(len(data)):
if data.Year[i] > 1970 and data.Country[i] in co2_mod.fit().params.keys():
co2_tech.append(co2_mod.fit().params[data.Country[i]] + (g_co2 * (data.Year[i] - 1971)))
else:
co2_tech.append('')
if data.Year[i] > 1970 and data.Country[i] in ch4_mod.fit().params.keys():
ch4_tech.append(ch4_mod.fit().params[data.Country[i]] + (g_ch4 * (data.Year[i] - 1971)))
else:
ch4_tech.append('')
if data.Year[i] > 1970 and data.Country[i] in nox_mod.fit().params.keys():
nox_tech.append(nox_mod.fit().params[data.Country[i]] + (g_nox * (data.Year[i] - 1971)))
else:
nox_tech.append('')
# Add technology values to data set
co2_tech = pd.Series(co2_tech, name = 'co2_tech')
ch4_tech = pd.Series(co2_tech, name = 'ch4_tech')
nox_tech = pd.Series(co2_tech, name = 'nox_tech')
data = pd.concat([data, co2_tech, ch4_tech, nox_tech], axis = 1)
# Convert '' to np.nan to use pandas dropna
data[data[['co2_tech', 'ch4_tech', 'nox_tech']] == ''] = np.nan
# Data prep for network effects regressions for intensities
tc_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'TC_CO2_ROB', 'Country', 'Year']].dropna()
tc_ch4_rob = data[['ch4_intensity', 'ch4_intensity_init', 'ch4_intensity_lag', 'ch4_tech', 'TC_CH4_ROB', 'Country', 'Year']].dropna()
tc_nox_rob = data[['nox_intensity', 'nox_intensity_init', 'nox_intensity_lag', 'nox_tech', 'TC_NOX_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(tc_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(tc_co2_rob['Year'])
ch4_national_dummies = pd.get_dummies(tc_ch4_rob['Country'])
ch4_year_dummies = pd.get_dummies(tc_ch4_rob['Year'])
nox_national_dummies = pd.get_dummies(tc_nox_rob['Country'])
nox_year_dummies = pd.get_dummies(tc_nox_rob['Year'])
xtc_co2_rob = pd.concat([tc_co2_rob, co2_national_dummies, co2_year_dummies], axis = 1).drop(['co2_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xtc_ch4_rob = pd.concat([tc_ch4_rob, ch4_national_dummies, ch4_year_dummies], axis = 1).drop(['ch4_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xtc_nox_rob = pd.concat([tc_nox_rob, nox_national_dummies, nox_year_dummies], axis = 1).drop(['nox_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
exp_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'EXP_CO2_ROB', 'Country', 'Year']].dropna()
exp_ch4_rob = data[['ch4_intensity', 'ch4_intensity_init', 'ch4_intensity_lag', 'ch4_tech', 'EXP_CH4_ROB', 'Country', 'Year']].dropna()
exp_nox_rob = data[['nox_intensity', 'nox_intensity_init', 'nox_intensity_lag', 'nox_tech', 'EXP_NOX_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(exp_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(exp_co2_rob['Year'])
ch4_national_dummies = pd.get_dummies(exp_ch4_rob['Country'])
ch4_year_dummies = pd.get_dummies(exp_ch4_rob['Year'])
nox_national_dummies = pd.get_dummies(exp_nox_rob['Country'])
nox_year_dummies = pd.get_dummies(exp_nox_rob['Year'])
xexp_co2_rob = pd.concat([exp_co2_rob, co2_national_dummies, co2_year_dummies], axis = 1).drop(['co2_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xexp_ch4_rob = pd.concat([exp_ch4_rob, ch4_national_dummies, ch4_year_dummies], axis = 1).drop(['ch4_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
xexp_nox_rob = pd.concat([exp_nox_rob, nox_national_dummies, nox_year_dummies], axis = 1).drop(['nox_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
imp_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'IMP_CO2_ROB', 'Country', 'Year']].dropna()
imp_ch4_rob = data[['ch4_intensity', 'ch4_intensity_init', 'ch4_intensity_lag', 'ch4_tech', 'IMP_CH4_ROB', 'Country', 'Year']].dropna()
imp_nox_rob = data[['nox_intensity', 'nox_intensity_init', 'nox_intensity_lag', 'nox_tech', 'IMP_NOX_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(imp_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(imp_co2_rob['Year'])
ch4_national_dummies = pd.get_dummies(imp_ch4_rob['Country'])
ch4_year_dummies = pd.get_dummies(imp_ch4_rob['Year'])
nox_national_dummies = pd.get_dummies(imp_nox_rob['Country'])
nox_year_dummies = pd.get_dummies(imp_nox_rob['Year'])
ximp_co2_rob = pd.concat([imp_co2_rob, co2_national_dummies, co2_year_dummies], axis = 1).drop(['co2_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
ximp_ch4_rob = pd.concat([imp_ch4_rob, ch4_national_dummies, ch4_year_dummies], axis = 1).drop(['ch4_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
ximp_nox_rob = pd.concat([imp_nox_rob, nox_national_dummies, nox_year_dummies], axis = 1).drop(['nox_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
# Run network effects regressions for intensities
tc_co2_rob_mod = stats.OLS(tc_co2_rob['co2_intensity'].astype(float), stats.add_constant(xtc_co2_rob).astype(float))
tc_ch4_rob_mod = stats.OLS(tc_ch4_rob['ch4_intensity'].astype(float), stats.add_constant(xtc_ch4_rob).astype(float))
tc_nox_rob_mod = stats.OLS(tc_nox_rob['nox_intensity'].astype(float), stats.add_constant(xtc_nox_rob).astype(float))
exp_co2_rob_mod = stats.OLS(exp_co2_rob['co2_intensity'].astype(float), stats.add_constant(xexp_co2_rob).astype(float))
exp_ch4_rob_mod = stats.OLS(exp_ch4_rob['ch4_intensity'].astype(float), stats.add_constant(xexp_ch4_rob).astype(float))
exp_nox_rob_mod = stats.OLS(exp_nox_rob['nox_intensity'].astype(float), stats.add_constant(xexp_nox_rob).astype(float))
imp_co2_rob_mod = stats.OLS(imp_co2_rob['co2_intensity'].astype(float), stats.add_constant(ximp_co2_rob).astype(float))
imp_ch4_rob_mod = stats.OLS(imp_ch4_rob['ch4_intensity'].astype(float), stats.add_constant(ximp_ch4_rob).astype(float))
imp_nox_rob_mod = stats.OLS(imp_nox_rob['nox_intensity'].astype(float), stats.add_constant(ximp_nox_rob).astype(float))
# Write results of regressions to file
co2_models = [tc_co2_rob_mod, exp_co2_rob_mod, imp_co2_rob_mod]
names = ['Competition', 'Exports', 'Imports']
res_list = []
for mod in co2_models:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/Main_CO2_' + names[co2_models.index(mod)] + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Pollution/restab_networks_CO2.txt')
ch4_models = [tc_ch4_rob_mod, exp_ch4_rob_mod, imp_ch4_rob_mod]
res_list = []
for mod in ch4_models:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/Main_CH4_' + names[ch4_models.index(mod)] + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Pollution/restab_networks_CH4.txt')
nox_models = [tc_nox_rob_mod, exp_nox_rob_mod, imp_nox_rob_mod]
res_list = []
for mod in nox_models:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/Main_NOX_' + names[nox_models.index(mod)] + '.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Pollution/restab_networks_NOX.txt')
# Geographical regression
geo_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'GEO_CO2', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(geo_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(geo_co2_rob['Year'])
geo_co2_robx = pd.concat([geo_co2_rob, co2_national_dummies, co2_year_dummies], axis = 1).drop(['co2_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
geo_co2_rob_mod = stats.OLS(geo_co2_rob['co2_intensity'].astype(float), stats.add_constant(geo_co2_robx).astype(float))
res = geo_co2_rob_mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/GEO_CO2.txt', 'w')
file.write(res.summary().as_text())
file.close()
# Regressions with geographic spillovers and network effects
# GEO + TC
geo_tc_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'GEO_CO2', 'TC_CO2_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = pd.get_dummies(geo_tc_co2_rob['Country'])
co2_year_dummies = pd.get_dummies(geo_tc_co2_rob['Year'])
geo_tc_co2_robx = pd.concat([geo_tc_co2_rob, co2_national_dummies, co2_year_dummies], axis = 1).drop(['co2_intensity', 'Country', 'Year', 'Zimbabwe', 1971], axis = 1)
geo_tc_co2_rob_mod = stats.OLS(geo_tc_co2_rob['co2_intensity'].astype(float), stats.add_constant(geo_tc_co2_robx).astype(float))
res = geo_tc_co2_rob_mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/GEO_CO2_TC.txt', 'w')
file.write(res.summary().as_text())
file.close()
# GEO + EXPORTS
geo_exp_co2_rob = data[['co2_intensity', 'co2_intensity_init', 'co2_intensity_lag', 'co2_tech', 'GEO_CO2', 'EXP_CO2_ROB', 'Country', 'Year']].dropna()
co2_national_dummies = | pd.get_dummies(geo_exp_co2_rob['Country']) | pandas.get_dummies |
import re
from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import os
import logging
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _skip_if_no_project_id():
if not _get_project_id():
raise nose.SkipTest(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def _get_private_key_contents():
if _in_travis_environment():
with open(os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])) as f:
return f.read()
else:
return PRIVATE_KEY_JSON_CONTENTS
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
try:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
except:
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow # noqa
_GOOGLE_API_CLIENT_INSTALLED = True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) >=
StrictVersion(google_api_minimum_version)):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2 # noqa
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
# Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
# - ServiceAccountCredentials from oauth2client.service_account
# SignedJwtAssertionCredentials is available in oauthclient < 2.0.0
# ServiceAccountCredentials is available in oauthclient >= 2.0.0
oauth2client_v1 = True
oauth2client_v2 = True
try:
from oauth2client.client import SignedJwtAssertionCredentials # noqa
except ImportError:
oauth2client_v1 = False
try:
from oauth2client.service_account import ServiceAccountCredentials # noqa
except ImportError:
oauth2client_v2 = False
if not oauth2client_v1 and not oauth2client_v2:
raise ImportError("Missing oauth2client required for BigQuery "
"service account support")
def _setup_common():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
if _in_travis_environment():
logging.getLogger('oauth2client').setLevel(logging.ERROR)
logging.getLogger('apiclient').setLevel(logging.ERROR)
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See Issue #13577
import httplib2
try:
from googleapiclient.discovery import build
except ImportError:
from apiclient.discovery import build
try:
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobs = bigquery_service.jobs()
job_data = {'configuration': {'query': {'query': 'SELECT 1'}}}
jobs.insert(projectId=_get_project_id(), body=job_data).execute()
return True
except:
return False
def clean_gbq_environment(private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with tm.assert_produces_warning(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
class TestGBQConnectorIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Can get default_credentials "
"from the environment!")
credentials = self.sut.get_application_default_credentials()
self.assertIsNone(credentials)
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Cannot get default_credentials "
"from the environment!")
from oauth2client.client import GoogleCredentials
credentials = self.sut.get_application_default_credentials()
self.assertTrue(isinstance(credentials, GoogleCredentials))
class TestGBQConnectorServiceAccountKeyPathIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class TestGBQConnectorServiceAccountKeyContentsIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class GBQUnitTests(tm.TestCase):
def setUp(self):
_setup_common()
def test_import_google_api_python_client(self):
if compat.PY2:
with tm.assertRaises(ImportError):
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
else:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
def test_should_return_bigquery_integers_as_python_floats(self):
result = gbq._parse_entry(1, 'INTEGER')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_floats_as_python_floats(self):
result = gbq._parse_entry(1, 'FLOAT')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_timestamps_as_numpy_datetime(self):
result = gbq._parse_entry('0e9', 'TIMESTAMP')
tm.assert_equal(result, np_datetime64_compat('1970-01-01T00:00:00Z'))
def test_should_return_bigquery_booleans_as_python_booleans(self):
result = gbq._parse_entry('false', 'BOOLEAN')
tm.assert_equal(result, False)
def test_should_return_bigquery_strings_as_python_strings(self):
result = gbq._parse_entry('STRING', 'STRING')
tm.assert_equal(result, 'STRING')
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with tm.assertRaises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.read_gbq('SELECT "1" as NUMBER_1')
def test_that_parse_data_works_properly(self):
test_schema = {'fields': [
{'mode': 'NULLABLE', 'name': 'VALID_STRING', 'type': 'STRING'}]}
test_page = [{'f': [{'v': 'PI'}]}]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'VALID_STRING': ['PI']})
| tm.assert_frame_equal(test_output, correct_output) | pandas.util.testing.assert_frame_equal |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
return Series(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
concatenated.sort_index(inplace=True)
return concatenated
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# try to do a fast transform via merge if possible
try:
obj = self._obj_with_exclusions
if isinstance(func, compat.string_types):
result = getattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = getattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
if ((not isinstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
len(result.index) != len(obj.index)):
results = obj.values.copy()
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
return DataFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can merge the result in
# GH 7383
names = result.columns
result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if (isinstance(res, (bool, np.bool_)) or
np.isscalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
_block_agg_axis = 1
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if isinstance(key, (list, tuple, Series, Index, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif not self.as_index:
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
# kind of a kludge
return SeriesGroupBy(self.obj[key], selection=key,
grouper=self.grouper,
exclusions=self.exclusions)
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if result:
if self.axis == 0:
result = DataFrame(result, index=obj.columns,
columns=result_index).T
else:
result = DataFrame(result, index=obj.index,
columns=result_index)
else:
result = DataFrame(result)
return result
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
group_levels = self.grouper.get_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
group_levels = self.grouper.get_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not participated in
the groupings (e.g. may have all been nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([ping._was_factor for ping in groupings]):
return result
levels_list = [ ping._group_index for ping in groupings ]
index = MultiIndex.from_product(levels_list, names=self.grouper.names)
return result.reindex(**{ self.obj._get_axis_name(self.axis) : index, 'copy' : False }).sortlevel()
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.tools.merge import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
from pandas.tools.plotting import boxplot_frame_groupby
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise NotImplementedError
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
class NDArrayGroupBy(GroupBy):
pass
#----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return com.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _algos.groupsort_indexer(self.labels, self.ngroups)[0]
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise NotImplementedError
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
#----------------------------------------------------------------------
# Misc utilities
def get_group_index(label_list, shape):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations.
"""
if len(label_list) == 1:
return label_list[0]
n = len(label_list[0])
group_index = np.zeros(n, dtype=np.int64)
mask = np.zeros(n, dtype=bool)
for i in range(len(shape)):
stride = np.prod([x for x in shape[i + 1:]], dtype=np.int64)
group_index += com._ensure_int64(label_list[i]) * stride
mask |= label_list[i] < 0
np.putmask(group_index, mask, -1)
return group_index
_INT64_MAX = np.iinfo(np.int64).max
def _int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def _indexer_from_factorized(labels, shape, compress=True):
if _int64_overflow_possible(shape):
indexer = np.lexsort(np.array(labels[::-1]))
return indexer
group_index = get_group_index(labels, shape)
if compress:
comp_ids, obs_ids = _compress_group_index(group_index)
max_group = len(obs_ids)
else:
comp_ids = group_index
max_group = com._long_prod(shape)
if max_group > 1e6:
# Use mergesort to avoid memory errors in counting sort
indexer = comp_ids.argsort(kind='mergesort')
else:
indexer, _ = _algos.groupsort_indexer(comp_ids.astype(np.int64),
max_group)
return indexer
def _lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = | Categorical(key,ordered=True) | pandas.core.categorical.Categorical |
import pandas as pd
import random
import sklearn.preprocessing as pp
from datetime import datetime
import itertools
import re
from scipy.stats import entropy
import uuid
import pickle
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import sqlite3
from collections import Counter
import numpy as np
from sklearn.linear_model import SGDClassifier
pd.options.display.max_columns = 50
pd.options.display.max_colwidth = 200
pd.options.display.max_colwidth = 200
pd.set_option('display.max_rows', None)
RND_SEED = 45822
random.seed(RND_SEED)
np.random.seed(RND_SEED)
connection = sqlite3.connect('database.db', timeout=30)
with open("schema.sql") as f:
connection.executescript(f.read())
def get_db_connection():
conn = sqlite3.connect('database.db', timeout=30)
conn.row_factory = sqlite3.Row
return conn
def populate_texts_table_sql(texts_list, table_name="texts", reset_labels=True):
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM " + table_name + ";")
conn.commit()
if reset_labels:
for text_record in texts_list:
cur.execute("INSERT INTO " + table_name + " (id, text, label) VALUES (?, ?, ?)",
(text_record["id"], text_record["text"], "-"))
else:
for text_record in texts_list:
cur.execute("INSERT INTO " + table_name + " (id, text, label) VALUES (?, ?, ?)",
(text_record["id"], text_record["text"], text_record["label"]))
conn.commit()
conn.close()
return None
def get_decimal_value(name):
conn = get_db_connection()
query = "SELECT name, value FROM decimalValues WHERE name = '%s' ;" % name
sql_table = conn.execute(query).fetchall()
decimal_value = [dict(row)["value"] for row in sql_table][0]
conn.close()
return decimal_value
def set_decimal_value(name, value):
conn = get_db_connection()
query = "UPDATE decimalValues SET value = %s WHERE name = '%s' ;" % (value, name)
conn.execute(query)
conn.commit()
conn.close()
return None
def update_overall_quality_scores(value):
current_score = get_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL")
set_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL_PREVIOUS", value=current_score)
set_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL", value=value)
return None
def set_pkl(name, pkl_data, reset=False):
conn = get_db_connection()
cur = conn.cursor()
if not reset:
test_query = cur.execute('SELECT * FROM pkls WHERE name = ?', (name,)).fetchall()
if len(test_query) > 0:
cur.execute('DELETE FROM pkls WHERE name = ?', (name,))
query = "INSERT INTO pkls (name, data) VALUES (?, ?)"
pkl_data_ = pickle.dumps(pkl_data)
cur.execute(query, (name, pkl_data_))
# test_query = cur.execute('SELECT * FROM pkls WHERE name = ?', (name,)).fetchall()
# test_data = pickle.loads([dict(row)["data"] for row in test_query][0])
else:
cur.execute("DELETE FROM pkls WHERE name = '" + name + "';")
conn.commit()
conn.close()
return None
def get_pkl(name):
try:
conn = get_db_connection()
cur = conn.cursor()
query = "SELECT * FROM pkls WHERE name = '" + name + "';"
pkl_table = cur.execute(query).fetchall()
data = [dict(row)["data"] for row in pkl_table]
if len(data) > 0:
pkl_data = pickle.loads(data[0])
else:
pkl_data = None
conn.close()
return pkl_data
except:
return None
def get_text_list(table_name="texts"):
conn = get_db_connection()
text_list_sql = conn.execute("SELECT * FROM " + table_name).fetchall()
text_list_sql = [dict(row) for row in text_list_sql]
conn.close()
return text_list_sql
def set_text_list(label, table_name="searchResults"):
conn = get_db_connection()
conn.execute("UPDATE " + table_name + " SET label = '" + label + "'")
conn.commit()
conn.close()
return None
def clear_text_list(table_name="searchResults"):
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM " + table_name + ";")
conn.commit()
conn.close()
return None
def get_appropriate_text_list_list(text_list_full_sql, total_pages_sql, search_results_length_sql, table_limit_sql):
if search_results_length_sql > 0:
text_list_full_sql = get_text_list(table_name="searchResults")
total_pages_sql = get_variable_value(name="SEARCH_TOTAL_PAGES")
text_list_list_sql = create_text_list_list(text_list_full_sql=text_list_full_sql, sub_list_limit=table_limit_sql)
return text_list_list_sql, text_list_full_sql, total_pages_sql
def get_y_classes():
conn = get_db_connection()
y_classes_sql = conn.execute('SELECT className FROM yClasses;').fetchall()
y_classes_sql = [dict(row)["className"] for row in y_classes_sql]
conn.close()
return y_classes_sql
def clear_y_classes():
conn = get_db_connection()
conn.execute('DELETE FROM yClasses;')
conn.commit()
conn.close()
return []
def add_y_classes(y_classses_list, begin_fresh=True):
conn = get_db_connection()
cur = conn.cursor()
if begin_fresh:
cur.execute("DELETE FROM yClasses;")
for i, value in enumerate(y_classses_list):
cur.execute("INSERT INTO yClasses (classId, className) VALUES (?, ?)", (i, value))
conn.commit()
conn.close()
return 1
def get_click_log():
conn = get_db_connection()
sql_table = \
conn.execute('SELECT click_id, click_location, click_type, click_object, click_date_time FROM clickRecord;').fetchall()
click_log_sql = list()
for row in sql_table:
dict_row = {"click_id": dict(row)["click_id"], "click_location": dict(row)["click_location"],
"click_type": dict(row)["click_type"], "click_object": dict(row)["click_object"],
"click_date_time" : dict(row)["click_date_time"]}
click_log_sql.append(dict_row)
conn.close()
return click_log_sql
def get_value_log():
conn = get_db_connection()
sql_table = \
conn.execute('SELECT click_id, value_type, value FROM valueRecord;').fetchall()
click_log_sql = list()
for row in sql_table:
dict_row = {"click_id": dict(row)["click_id"], "value_type": dict(row)["value_type"],
"value": dict(row)["value"]}
click_log_sql.append(dict_row)
conn.close()
return click_log_sql
def reset_log_click_record_sql():
conn = get_db_connection()
conn.execute("DELETE FROM clickRecord")
conn.commit()
conn.close()
return None
def reset_log_click_value_sql():
conn = get_db_connection()
conn.execute("DELETE FROM valueRecord")
conn.commit()
conn.close()
return None
def add_log_click_record_sql(records):
conn = get_db_connection()
cur = conn.cursor()
for record in records:
cur.execute("""INSERT INTO clickRecord (click_id, click_location, click_type, click_object, click_date_time)
VALUES (?, ?, ?, ?, ?)""", (record["click_id"], record["click_location"], record["click_type"],
record["click_object"], record["click_date_time"]))
conn.commit()
conn.close()
return None
def add_log_click_value_sql(records):
conn = get_db_connection()
cur = conn.cursor()
for record in records:
cur.execute("""INSERT INTO valueRecord (click_id, value_type, value)
VALUES (?, ?, ?)""", (record["click_id"], record["value_type"], record["value"]))
conn.commit()
conn.close()
return None
def get_panel_flags():
conn = get_db_connection()
sql_table = conn.execute('SELECT name, value FROM initializeFlags;').fetchall()
panel_flags = {dict(row)["name"]: dict(row)["value"] for row in sql_table}
conn.close()
return panel_flags
def update_panel_flags_sql(update_flag):
conn = get_db_connection()
cur = conn.cursor()
update_query = "UPDATE initializeFlags SET value = ? WHERE name = ?;"
for name, value in update_flag.items():
cur.execute(update_query, (value, name))
conn.commit()
conn.close()
return None
def get_texts_group_x(table_name="group1Texts"):
conn = get_db_connection()
sql_table = conn.execute("SELECT id, text, label FROM " + table_name + ";").fetchall()
conn.close()
if len(sql_table) > 0:
texts_group_2 = [{"id": dict(row)["id"], "text": dict(row)["text"], "label": dict(row)["label"]} for row in sql_table]
else:
texts_group_2 = []
return texts_group_2
def set_texts_group_x(top_texts, table_name="group1Texts"):
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM " + table_name + ";")
if top_texts:
for record in top_texts:
cur.execute("INSERT INTO " + table_name + " (id, text, label) VALUES (?, ?, ?)",
(record["id"], record["text"], record["label"]))
conn.commit()
conn.close()
return None
def get_total_summary_sql():
conn = get_db_connection()
sql_table = conn.execute('SELECT name, number, percentage FROM totalSummary;').fetchall()
total_summary = [{"name": dict(row)["name"],
"number": dict(row)["number"],
"percentage": dict(row)["percentage"]} for row in sql_table]
conn.close()
return total_summary
def set_total_summary(text_lists):
labels = [text_obj["label"] for text_obj in text_lists]
label_counter = Counter(labels)
total_texts = len(text_lists)
number_unlabeled = label_counter["-"]
number_labeled = total_texts - number_unlabeled
total_texts_percentage = "100.00%"
if total_texts > 0:
number_unlabeled_percentage = "{:.2%}".format(number_unlabeled / total_texts)
number_labeled_percentage = "{:.2%}".format(number_labeled / total_texts)
else:
number_unlabeled_percentage = "{:.2%}".format(1.0)
number_labeled_percentage = "{:.2%}".format(0.0)
total_summary = list()
total_summary.append({"name": "Total Texts",
"number": "{:,}".format(total_texts),
"percentage": total_texts_percentage})
total_summary.append({"name": "Total Unlabeled",
"number": "{:,}".format(number_unlabeled),
"percentage": number_unlabeled_percentage})
total_summary.append({"name": "Total Labeled",
"number": "{:,}".format(number_labeled),
"percentage": number_labeled_percentage})
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM totalSummary;")
for record in total_summary:
cur.execute("INSERT INTO totalSummary (name, number, percentage) VALUES (?, ?, ?)",
(record["name"], record["number"], record["percentage"]))
conn.commit()
conn.close()
return None
def get_label_summary_sql():
conn = get_db_connection()
sql_table = conn.execute('SELECT name, number, percentage FROM labelSummary;').fetchall()
label_summary = [{"name": dict(row)["name"],
"number": dict(row)["number"],
"percentage": dict(row)["percentage"]} for row in sql_table]
conn.close()
return label_summary
def set_label_summary(text_lists):
labels = [text_obj["label"] for text_obj in text_lists]
label_counter = Counter(labels)
total_texts = len(text_lists)
label_summary = []
for key, value in label_counter.items():
label_summary.append({"name": key,
"number": "{:,}".format(value),
"percentage": "{:.2%}".format(value / total_texts)})
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM labelSummary;")
for record in label_summary:
cur.execute("INSERT INTO labelSummary (name, number, percentage) VALUES (?, ?, ?)",
(record["name"], record["number"], record["percentage"]))
conn.commit()
conn.close()
return None
def get_selected_text(selected_text_id, text_list_full_sql):
selected_text_test = [text["text"] for text in text_list_full_sql if text["id"] == selected_text_id]
if selected_text_id:
if len(selected_text_test) == 0:
selected_text = ""
else:
selected_text = selected_text_test[0]
else:
selected_text = ""
return selected_text
def create_text_list_list(text_list_full_sql, sub_list_limit):
texts_list_list = \
[text_list_full_sql[i:i + sub_list_limit] for i in range(0, len(text_list_full_sql), sub_list_limit)]
return texts_list_list
def update_texts_list_by_id_sql(update_objs=None, selected_label=None, update_ids=None, sub_list_limit=10,
update_in_place=True):
conn = get_db_connection()
cur = conn.cursor()
if selected_label and update_ids and not update_objs:
if update_in_place:
update_query = "UPDATE texts SET label = ? WHERE id IN (%s)" % ",".join("?"*len(update_ids))
update_values = [selected_label]
update_values.extend(update_ids)
cur.execute(update_query, update_values)
conn.commit()
conn.close()
else:
cur.execute("DROP TABLE IF EXISTS temp_table;")
cur.execute("""
CREATE TABLE temp_table (
id TEXT NOT NULL,
text TEXT NOT NULL,
label TEXT NOT NULL
);
""")
query = "INSERT INTO temp_table SELECT * FROM texts WHERE id IN (%s)" % ",".join("?" * len(update_ids))
cur.execute(query, update_ids)
cur.execute("UPDATE temp_table SET label = ?", (selected_label, ))
cur.execute("DELETE FROM texts WHERE id IN (%s)" % ",".join("?" * len(update_ids)), update_ids)
cur.execute("INSERT INTO texts SELECT * FROM temp_table;")
conn.commit()
conn.close()
elif update_objs and not selected_label and not update_ids:
if update_in_place:
labels = set([obj["label"] for obj in update_objs])
for label in labels:
update_ids = [obj["id"] for obj in update_objs if obj["label"] == label]
update_ids_sql = ", ".join(update_ids)
update_query = "UPDATE texts SET label = ? WHERE id IN (%s)" % update_ids_sql
conn.execute(update_query, (label, ))
conn.commit()
conn.close()
else:
cur.execute("DROP TABLE IF EXISTS temp_table;")
cur.execute("""
CREATE TABLE temp_table (
id TEXT NOT NULL,
text TEXT NOT NULL,
label TEXT NOT NULL
);
""")
all_update_ids = [obj["id"] for obj in update_objs]
query = "INSERT INTO temp_table SELECT * FROM texts WHERE id IN (%s)" % ",".join("?" * len(all_update_ids))
cur.execute(query, all_update_ids)
labels = set([obj["label"] for obj in update_objs])
for label in labels:
update_ids = [obj["id"] for obj in update_objs if obj["label"] == label]
update_ids_sql = ", ".join(update_ids)
update_query = "UPDATE temp_table SET label = ? WHERE id IN (%s)" % update_ids_sql
conn.execute(update_query, (label,))
delete_query = "DELETE FROM texts WHERE id IN (%s)" % ",".join("?" * len(all_update_ids))
cur.execute(delete_query, all_update_ids)
cur.execute("INSERT INTO texts SELECT * FROM temp_table;")
conn.commit()
conn.close()
text_list_full = get_text_list(table_name="texts")
texts_list_list = create_text_list_list(text_list_full_sql=text_list_full, sub_list_limit=sub_list_limit)
return text_list_full, texts_list_list
def label_all_sql(fitted_classifier, sparse_vectorized_corpus, corpus_text_ids, texts_list,
label_only_unlabeled=True, sub_list_limit=50, update_in_place=True):
texts_list_df = pd.DataFrame(texts_list)
if not label_only_unlabeled:
predictions = fitted_classifier.predict(sparse_vectorized_corpus)
predictions_df = pd.DataFrame(predictions)
predictions_df["id"] = corpus_text_ids
labeled_text_ids = corpus_text_ids
number_to_label = len(labeled_text_ids)
else:
label_only_these_ids = texts_list_df[texts_list_df["label"] == "-"]["id"].values
keep_indices = [corpus_text_ids.index(x) for x in label_only_these_ids]
number_to_label = len(keep_indices)
if number_to_label > 0:
if label_only_unlabeled:
sparse_vectorized_corpus_alt = sparse_vectorized_corpus[keep_indices, :]
predictions = fitted_classifier.predict(sparse_vectorized_corpus_alt)
predictions_df = pd.DataFrame(predictions)
predictions_df["id"] = label_only_these_ids
labeled_text_ids = label_only_these_ids
predictions_df = predictions_df.rename(columns={0: "label"})
predictions_df = predictions_df.merge(texts_list_df[["id", "text"]], left_on="id", right_on="id",
how="left")
predictions_df = predictions_df[["id", "text", "label"]]
update_objects = predictions_df.to_dict("records")
text_list_full, texts_list_list = \
update_texts_list_by_id_sql(update_objs=update_objects,
selected_label=None,
update_ids=None,
sub_list_limit=sub_list_limit,
update_in_place=update_in_place)
else:
text_list_full = get_text_list(table_name="texts")
texts_list_list = create_text_list_list(text_list_full_sql=text_list_full, sub_list_limit=sub_list_limit)
labeled_text_ids = []
return text_list_full, texts_list_list, labeled_text_ids
def generate_summary_sql(text_lists):
labels = [text_obj["label"] for text_obj in text_lists]
label_counter = Counter(labels)
total_texts = len(text_lists)
number_unlabeled = label_counter["-"]
number_labeled = total_texts - number_unlabeled
set_total_summary(text_lists=text_lists)
set_label_summary(text_lists=text_lists)
set_variable(name="NUMBER_UNLABELED_TEXTS", value=number_unlabeled)
summary_headline = \
"Total Labeled : {:,} / {:,} {:.1%}".format(number_labeled, total_texts, number_labeled / total_texts)
set_variable(name="LABEL_SUMMARY_STRING", value=summary_headline)
total_summary_sql = get_total_summary_sql()
label_summary_sql = get_label_summary_sql()
number_unlabeled_texts_sql = number_unlabeled
label_summary_string_sql = summary_headline
return total_summary_sql, label_summary_sql, number_unlabeled_texts_sql, label_summary_string_sql
def set_variable(name, value):
conn = get_db_connection()
cur = conn.cursor()
test_query = cur.execute('SELECT * FROM variables WHERE name = ?', (name,)).fetchall()
if len(test_query) > 0:
cur.execute('DELETE FROM variables WHERE name = ?', (name,))
query = """INSERT INTO variables (name, value) VALUES (?, ?)
"""
else:
query = """INSERT INTO variables (name, value) VALUES (?, ?)
"""
cur.execute(query, (name, value))
conn.commit()
conn.close()
return 1
def get_variable_value(name):
conn = get_db_connection()
cur = conn.cursor()
query = cur.execute('SELECT value FROM variables WHERE name = ?', (name,)).fetchall()
value = [dict(row)["value"] for row in query]
value = value[0]
if name in ["TOTAL_PAGES", "NUMBER_UNLABELED_TEXTS", "MAX_CONTENT_PATH", "TEXTS_LIMIT", "TABLE_LIMIT",
"MAX_FEATURES", "RND_STATE", "PREDICTIONS_NUMBER", "SEARCH_RESULTS_LENGTH", "GROUP_1_KEEP_TOP",
"GROUP_3_KEEP_TOP", "CONFIRM_LABEL_ALL_TEXTS_COUNTS", "SEARCH_TOTAL_PAGES", "LABEL_ALL_BATCH_NO",
"LABEL_ALL_TOTAL_BATCHES", "NUMBER_AUTO_LABELED", "LABEL_ALL_BATCH_SIZE"]:
value = int(value)
if name in ["KEEP_ORIGINAL", "GROUP_1_EXCLUDE_ALREADY_LABELED", "GROUP_2_EXCLUDE_ALREADY_LABELED",
"PREDICTIONS_VERBOSE", "SIMILAR_TEXT_VERBOSE", "FIT_CLASSIFIER_VERBOSE", "FIRST_LABELING_FLAG",
"FULL_FIT_IF_LABELS_GOT_OVERRIDDEN", "FORCE_FULL_FIT_FOR_DIFFICULT_TEXTS",
"LABELS_GOT_OVERRIDDEN_FLAG", "UPDATE_TEXTS_IN_PLACE"]:
if value == "True":
value = True
else:
value = False
if name in ["PREDICTIONS_PROBABILITY"]:
value = float(value)
conn.commit()
conn.close()
return value
def get_difficult_texts_sql():
try:
conn = get_db_connection()
select_diff_texts_query = get_variable_value(name="SELECT_DIFF_TEXTS_QUERY")
sql_cols_list_y_classes = get_pkl(name="SQL_COLS_LIST_Y_CLASSES")
sql_table = conn.execute(select_diff_texts_query).fetchall()
total_summary = list()
for row in sql_table:
temp_row = {col: dict(row)[col] for col in sql_cols_list_y_classes}
total_summary.append(temp_row)
conn.close()
return total_summary
except:
return []
def reset_difficult_texts_sql():
try:
conn = get_db_connection()
cur = conn.cursor()
cur.execute('DELETE FROM difficultTexts')
conn.commit()
conn.close()
return None
except:
return None
def get_available_datasets():
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM availableDatasets;")
cur.execute("INSERT INTO availableDatasets SELECT * FROM fixedDatasets;")
conn.commit()
conn.close()
dataset_name, dataset_url, date_time, y_classes, total_summary = has_save_data()
if dataset_name and date_time and y_classes and total_summary:
date_at_end_check = re.findall(r"(.*)\-[0-9]{4}\-[0-9]{2}\-[0-9]{2}\-\-[0-9]{2}\-[0-9]{2}\-[0-9]{2}", dataset_name)
if len(date_at_end_check) > 0:
dataset_name_alt = date_at_end_check[0]
else:
dataset_name_alt = dataset_name
conn = get_db_connection()
cur = conn.cursor()
cur.execute("INSERT INTO availableDatasets (name, description, url) VALUES (?, ?, ?)",
(dataset_name_alt + "-" + date_time,
"A partially labeled dataset having " + total_summary[2]["percentage"] +
" of " + total_summary[0]["number"] + " texts labeled.",
dataset_url))
conn.commit()
conn.close()
conn = get_db_connection()
available_datasets_sql = conn.execute('SELECT * FROM availableDatasets').fetchall()
conn.close()
return available_datasets_sql
def has_save_data():
try:
dataset_name = get_pkl(name="DATASET_NAME")
dataset_url = get_pkl(name="DATASET_URL")
date_time = get_pkl(name="DATE_TIME")
y_classes = get_pkl(name="Y_CLASSES")
total_summary = get_pkl(name="TOTAL_SUMMARY")
return dataset_name, dataset_url, date_time, y_classes, total_summary
except:
return None, None, None, None, None
def get_all_predictions_sql(fitted_classifier, sparse_vectorized_corpus, corpus_text_ids, texts_list,
top=5,
y_classes=["earthquake", "fire", "flood", "hurricane"],
verbose=False,
round_to=2,
format_as_percentage=False):
predictions = fitted_classifier.predict_proba(sparse_vectorized_corpus)
predictions_df = pd.DataFrame(predictions)
y_classes = [x.replace(" ", "_") for x in y_classes]
predictions_df.columns = y_classes
predictions_summary = predictions_df.replace(0.0, np.NaN).mean(axis=0)
predictions_df["id"] = corpus_text_ids
texts_list_df = pd.DataFrame(texts_list)
predictions_df = predictions_df.merge(texts_list_df, left_on="id", right_on="id")
keep_cols = ["id", "text"]
keep_cols.extend(y_classes)
predictions_df = predictions_df[keep_cols]
pred_scores = score_predictions(predictions_df[y_classes], use_entropy=True, num_labels=len(y_classes))
overall_quality = np.mean(pred_scores)
overall_quality_score_decimal_sql = overall_quality
predictions_df["pred_scores"] = pred_scores
if round_to and not format_as_percentage:
predictions_df[y_classes] = predictions_df[y_classes].round(round_to)
predictions_summary = predictions_summary.round(round_to)
overall_quality = overall_quality.round(round_to)
if format_as_percentage:
if verbose:
print(">> get_all_predictions > predictions_df.head() :")
print(predictions_df.head(top))
predictions_df[y_classes] = predictions_df[y_classes]\
.astype(float)\
.applymap(lambda x: "{0:.0%}".format(x))
# predictions_summary = (predictions_summary.astype(float) * 100).round(1).astype(str) + "%"
overall_quality = (overall_quality.astype(float) * 100).round(1).astype(str) + "%"
predictions_df = predictions_df.sort_values(["pred_scores"], ascending=[True])
if verbose:
print(">> get_all_predictions > predictions_df.head() :")
print(predictions_df.head(top))
print(">> get_all_predictions > predictions_df.tail() :")
print(predictions_df.tail(top))
keep_cols = ["id", "text"]
keep_cols.extend(y_classes)
sql_cols_list = [x + ' TEXT NOT NULL' for x in keep_cols]
sql_cols = ", ".join(sql_cols_list)
top_texts = predictions_df.head(top)[keep_cols].to_dict("records")
sql_query_1 = """
DROP TABLE IF EXISTS difficultTexts;
"""
sql_query_2 = """
CREATE TABLE difficultTexts (
""" + sql_cols + """
);
"""
conn = get_db_connection()
cur = conn.cursor()
cur.execute(sql_query_1)
conn.commit()
cur.execute(sql_query_2)
conn.commit()
parameters = ", ".join(["?"] * len(keep_cols))
query = "INSERT INTO difficultTexts (" + ", ".join(keep_cols) + ") VALUES (%s)" % parameters
for record in top_texts:
insert_values = [value for key, value in record.items()]
cur.execute(query, (insert_values))
conn.commit()
conn.close()
conn = get_db_connection()
select_diff_texts_query = "SELECT " + ", ".join(keep_cols) + " FROM difficultTexts;"
set_variable(name="SELECT_DIFF_TEXTS_QUERY", value=select_diff_texts_query)
set_pkl(name="SQL_COLS_LIST_Y_CLASSES", pkl_data=keep_cols, reset=None)
sql_table = conn.execute(select_diff_texts_query).fetchall()
texts_group_3_sql = []
for row in sql_table:
texts_group_3_sql.append({key: value for key, value in dict(row).items()})
conn.close()
update_overall_quality_scores(value=overall_quality_score_decimal_sql)
set_variable(name="OVERALL_QUALITY_SCORE", value=overall_quality)
overall_quality_score_sql = overall_quality
overall_quality_score_decimal_previous_sql = get_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL_PREVIOUS")
return texts_group_3_sql, overall_quality_score_sql, \
overall_quality_score_decimal_sql, overall_quality_score_decimal_previous_sql
def get_top_predictions_sql(selected_class, fitted_classifier, sparse_vectorized_corpus, corpus_text_ids,
texts_list,
top=5,
cutoff_proba=0.95,
y_classes=["earthquake", "fire", "flood", "hurricane"],
verbose=False,
exclude_already_labeled=True):
predictions = fitted_classifier.predict_proba(sparse_vectorized_corpus)
predictions_df = pd.DataFrame(predictions)
predictions_df.columns = y_classes
predictions_df["id"] = corpus_text_ids
keep_cols = ["id"]
keep_cols.extend([selected_class])
predictions_df = predictions_df[keep_cols]
predictions_df = predictions_df[predictions_df[selected_class] > cutoff_proba]
predictions_df = predictions_df.sort_values([selected_class], ascending=False)
if exclude_already_labeled:
texts_list_df = pd.DataFrame.from_dict(texts_list)
predictions_df = predictions_df.merge(texts_list_df, left_on="id", right_on="id", how="left")
predictions_df = predictions_df[predictions_df["label"].isin(["-"])]
if verbose:
print(">> get_top_predictions > predictions_df :")
print(predictions_df.head(top))
filter_list = predictions_df.head(top)["id"].values
top_texts = filter_all_texts(texts_list, filter_list, exclude_already_labeled=False)
set_texts_group_x(top_texts=top_texts, table_name="group2Texts")
texts_group_3_sql = get_texts_group_x(table_name="group2Texts")
return texts_group_3_sql
def fit_classifier_sql(sparse_vectorized_corpus, corpus_text_ids, texts_list, texts_list_labeled,
y_classes=["earthquake", "fire", "flood", "hurricane"],
verbose=False,
random_state=2584,
n_jobs=-1,
labels_got_overridden_flag=False,
full_fit_if_labels_got_overridden=False):
texts_list_labeled_df = pd.DataFrame.from_dict(texts_list_labeled)
if verbose:
print("texts_list_labeled_df :")
print(texts_list_labeled_df.head())
ids = texts_list_labeled_df["id"].values
y_train = texts_list_labeled_df["label"].values
indices = [corpus_text_ids.index(x) for x in ids]
X_train = sparse_vectorized_corpus[indices, :]
classifier_sql = get_pkl(name="CLASSIFIER")
if classifier_sql:
clf = classifier_sql
else:
# clf = make_pipeline(StandardScaler(), SGDClassifier(max_iter=1000, tol=1e-3, random_state=2584))
clf = SGDClassifier(loss="modified_huber", max_iter=1000, tol=1e-3, random_state=random_state, n_jobs=n_jobs)
if labels_got_overridden_flag:
if full_fit_if_labels_got_overridden:
all_texts_list_labeled_df = pd.DataFrame.from_dict(texts_list)
all_texts_list_labeled_df = all_texts_list_labeled_df[~all_texts_list_labeled_df["label"].isin(["-"])]
y_classes_labeled = list(set(all_texts_list_labeled_df["label"].values))
all_classes_present = all(label in y_classes_labeled for label in y_classes)
clf = SGDClassifier(loss="modified_huber", max_iter=1000, tol=1e-3, random_state=random_state,
n_jobs=n_jobs)
ids_all = all_texts_list_labeled_df["id"].values
y_train_all = all_texts_list_labeled_df["label"].values
indices_all = [corpus_text_ids.index(x) for x in ids_all]
X_train_all = sparse_vectorized_corpus[indices_all, :]
if all_classes_present:
clf.fit(X_train_all, y_train_all)
else:
clf.partial_fit(X_train_all, y_train_all, classes=y_classes)
else:
clf.partial_fit(X_train, y_train, classes=y_classes)
else:
clf.partial_fit(X_train, y_train, classes=y_classes)
set_pkl(name="CLASSIFIER", pkl_data=clf, reset=False)
return clf
def load_new_data_sql(source_file,
text_id_col,
text_value_col,
source_folder="./output/upload/",
shuffle_by="kmeans",
table_limit=50, texts_limit=1000, max_features=100,
y_classes=["Label 1", "Label 2", "Label 3", "Label 4"], rnd_state=258):
data_df = get_new_data(source_file=source_file,
source_folder=source_folder,
number_samples=None,
random_state=rnd_state)
corpus_text_ids = [str(x) for x in data_df[text_id_col].values]
vectorizer = TfidfVectorizer(ngram_range=(1, 2), stop_words="english", max_features=max_features)
vectorized_corpus = vectorizer.fit_transform(data_df[text_value_col].values)
if shuffle_by == "kmeans":
kmeans = KMeans(n_clusters=len(y_classes), random_state=rnd_state).fit(vectorized_corpus)
kmeans_labels = kmeans.labels_
texts_list, adj_text_ids = convert_new_data_into_list_json(data_df,
limit=texts_limit,
shuffle_list=kmeans_labels,
random_shuffle=False,
random_state=rnd_state,
id_col=text_id_col,
text_col=text_value_col,
label_col="label")
else:
texts_list, adj_text_ids = convert_new_data_into_list_json(data_df,
limit=texts_limit,
shuffle_list=[],
random_shuffle=True,
random_state=rnd_state,
id_col=text_id_col,
text_col=text_value_col,
label_col="label")
populate_texts_table_sql(texts_list=texts_list, table_name="texts")
set_pkl(name="DATASET_NAME", pkl_data=None, reset=True)
set_pkl(name="DATASET_NAME", pkl_data=source_file, reset=False)
set_pkl(name="DATASET_URL", pkl_data=None, reset=True)
set_pkl(name="DATASET_URL", pkl_data="-", reset=False)
set_pkl(name="CORPUS_TEXT_IDS", pkl_data=None, reset=True)
set_pkl(name="CORPUS_TEXT_IDS", pkl_data=adj_text_ids, reset=False)
texts_list_list = [texts_list[i:i + table_limit] for i in range(0, len(texts_list), table_limit)]
total_pages = len(texts_list_list)
set_variable(name="TOTAL_PAGES", value=total_pages)
set_pkl(name="TEXTS_LIST_LIST", pkl_data=None, reset=True)
set_pkl(name="TEXTS_LIST_LIST", pkl_data=texts_list_list, reset=False)
set_pkl(name="VECTORIZED_CORPUS", pkl_data=None, reset=True)
set_pkl(name="VECTORIZED_CORPUS", pkl_data=vectorized_corpus, reset=False)
set_pkl(name="VECTORIZER", pkl_data=None, reset=True)
set_pkl(name="VECTORIZER", pkl_data=vectorizer, reset=False)
return texts_list, texts_list_list, adj_text_ids, total_pages, vectorized_corpus, vectorizer, corpus_text_ids
def load_demo_data_sql(dataset_name="Disaster Tweets Dataset", shuffle_by="kmeans",
table_limit=50, texts_limit=1000, max_features=100,
y_classes=["Earthquake", "Fire", "Flood", "Hurricane"], rnd_state=258):
if dataset_name == "Disaster Tweets Dataset":
consolidated_disaster_tweet_data_df = get_disaster_tweet_demo_data(number_samples=None,
filter_data_types=["train"],
random_state=rnd_state)
corpus_text_ids = [str(x) for x in consolidated_disaster_tweet_data_df["tweet_id"].values]
set_pkl(name="CORPUS_TEXT_IDS", pkl_data=corpus_text_ids, reset=False)
vectorizer = TfidfVectorizer(ngram_range=(1, 2), stop_words="english", max_features=max_features)
# https://stackoverflow.com/questions/69326639/sklearn-warnings-in-version-1-0
vectorized_corpus = \
vectorizer.fit_transform(consolidated_disaster_tweet_data_df["tweet_text"].values)
set_pkl(name="VECTORIZER", pkl_data=vectorizer, reset=False)
set_pkl(name="VECTORIZED_CORPUS", pkl_data=vectorized_corpus, reset=False)
if shuffle_by == "kmeans":
kmeans = KMeans(n_clusters=len(y_classes), random_state=rnd_state).fit(vectorized_corpus)
kmeans_labels = kmeans.labels_
texts_list, adj_text_ids = convert_demo_data_into_list_json(consolidated_disaster_tweet_data_df,
limit=texts_limit,
keep_labels=False,
shuffle_list=kmeans_labels,
random_shuffle=False,
random_state=rnd_state)
else:
texts_list, adj_text_ids = convert_demo_data_into_list_json(consolidated_disaster_tweet_data_df,
limit=texts_limit,
keep_labels=False,
shuffle_list=[],
random_shuffle=True,
random_state=rnd_state)
texts_list_list = [texts_list[i:i + table_limit] for i in range(0, len(texts_list), table_limit)]
total_pages = len(texts_list_list)
set_variable(name="TOTAL_PAGES", value=total_pages)
return texts_list, texts_list_list, adj_text_ids, total_pages, vectorized_corpus, vectorizer, corpus_text_ids
def generate_all_predictions_if_appropriate(n_jobs=-1,
labels_got_overridden_flag=True,
full_fit_if_labels_got_overridden=True,
round_to=1,
format_as_percentage=True):
classifier_sql = get_pkl(name="CLASSIFIER")
try:
if classifier_sql:
label_summary_sql = get_label_summary_sql()
label_summary_df = | pd.DataFrame.from_dict(label_summary_sql) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 09:55:29 2020
@author: Gary
"""
import pandas as pd
import core.Find_silent_change as fsc
import core.Read_FF as rff
import difflib
#import core.Construct_set as const_set
output = './out/'
tempdir = './tmp/'
arcdir = './arc_testing/'
upload_hash_ref = output+'upload_hash_ref.csv'
change_log = output+'silent_change_log.csv'
exclude_files = ['archive_2018_08_28.zip','sky_truth_final.zip']
skyfn = 'sky_truth_final'
def getDfForCompare(fn,sources='./sources/'):
fn = sources+fn
raw_df = rff.Read_FF(zname=fn).import_raw()
raw_df = raw_df[~(raw_df.IngredientKey.isna())]
return raw_df
def initializeSilentChangeRecords():
"""Careful!!! This does what the name describes!"""
ref = pd.DataFrame({'UploadKey':[],'last_hash':[]})
fsc.saveUpdatedHash(ref)
def startFromScratch():
"""Be aware - this initializes everything before running a LONG process on
all archived files!"""
initializeSilentChangeRecords()
archives = fsc.createInitialCompareList()
new = | pd.DataFrame({'UploadKey':None,'rhash':None},index=[]) | pandas.DataFrame |
import pandas as pd
import sys
if len(sys.argv) != 3:
print("Usage: python3 overhead.py raw.csv transform.csv")
raw = pd.read_csv(sys.argv[1])
tran = pd.read_csv(sys.argv[2])
half = len(tran) // 2
# raw = raw[half:]
# tran = tran[half:]
merged = | pd.merge(raw,tran, on=['Index', 'Index']) | pandas.merge |
"""
Computing in the thymic Q framework.
"""
import os
import tempfile
import click
import delegator
import numpy as np
import pandas as pd
# These are our "lvj" columns, which are the most common indices for
# probabilities and sequences in what follows.
lvj = ['length', 'v_gene', 'j_gene']
def bound(x, m):
"""
Cut off the values in x with a hard maximum m.
"""
return np.minimum(x, m)
def normalize_column(df, col_name):
"""
Normalize a column so that it has sum one.
"""
df[col_name] = df[col_name] / sum(df[col_name])
def add_pseudocount(df, col_name, pseudocount_multiplier):
"""
Add a pseudocount to `col_name`.
The pseudocount is pseudocount_multiplier times the smallest non-zero element.
"""
zero_rows = (df[col_name] == 0)
pseudocount = min(df.loc[~zero_rows, col_name]) * pseudocount_multiplier
df.loc[zero_rows, col_name] += pseudocount
def read_olga_tsv(path):
"""
Read in a TSV in the format liked by OLGA. Four columns means that there is
DNA data-- if so, drop it.
"""
df = pd.read_csv(path, sep='\t', header=None)
if len(df.columns) == 4:
df = df.iloc[:, 1:]
assert len(df.columns) == 3
df.columns = 'amino_acid v_gene j_gene'.split()
return df
def read_olga_pgen_tsv(path):
"""
Read in a TSV output by OLGA's Pgen calculation.
"""
df = pd.read_csv(path, sep='\t', header=None)
assert len(df.columns) == 4
df.columns = 'amino_acid v_gene j_gene Pgen'.split()
return df
def lvj_frequency_of_olga_tsv(path, col_name):
"""
Read in an OLGA TSV and calculate the frequency of the lvj triples
contained in it.
"""
df = read_olga_tsv(path)
df['length'] = df['amino_acid'].apply(len)
df = df.loc[:, lvj]
df[col_name] = 1.
df = df.groupby(lvj).sum()
normalize_column(df, col_name)
return df
def set_lvj_index(df):
"""
Make an lvj index in place from the length, v_gene, and j_gene.
"""
df['length'] = df['amino_acid'].apply(len)
df.set_index(lvj, inplace=True)
def merge_lvj_dfs(df1, df2, how='outer'):
"""
Merge two data frames on lvj indices.
By default, uses the union of the keys (an "outer" join).
"""
merged = pd.merge(df1, df2, how=how, left_index=True, right_index=True)
merged.fillna(0, inplace=True)
return merged
def q_of_train_and_model_pgen(model_p_lvj_csv, train_tsv, max_q=None, pseudocount_multiplier=0.5):
"""
Fit a q distribution, but truncating q at max_q.
"""
# Merge the p_lvj from the data and that from the model:
df = merge_lvj_dfs(
lvj_frequency_of_olga_tsv(train_tsv, col_name='data_P_lvj'), pd.read_csv(model_p_lvj_csv, index_col=lvj))
# We need to do this merge so we can add a pseudocount:
add_pseudocount(df, 'model_P_lvj', pseudocount_multiplier)
normalize_column(df, 'model_P_lvj')
q = df['data_P_lvj'] / df['model_P_lvj']
if max_q:
q = bound(q, max_q)
return | pd.DataFrame({'q': q}) | pandas.DataFrame |
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
import subprocess
#new unrelateds program
#take curated pedigree (curated to best knowledge), subset to those in HCA or HCD stg...
#merge with the list of unrelateds already selected last time around.
#fill in the blanks.
#latest pedigree
dev_peds_path='/home/petra/UbWinSharedSpace1/redcap2nda_Lifespan2019/Dev_pedigrees/'
pedsfile='redcapandexcelpeds09_23_2019.csv'
peds=pd.read_csv(dev_peds_path+pedsfile)
peds=peds[['subjectped','nda_guid','HCD_ID','HCA_ID','final_pedid']]
#nda vars from staging
hcalist='/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/ndafields2intradb/CCF_HCA_STG_23Sept2019.csv'
hcaids= | pd.read_csv(hcalist,header=0) | pandas.read_csv |
"""
helper_fxns.py
by <NAME>
------------------------------------------
A few helper functions that were aggregated from activities I found myself repeating.
Included are
- graphing feature importance (created for CatBoost but should work
with relevant sklearn models
- mass scoring that generates a number of categorical scores, as well as
confusion matrices (it needs a loss. Perhaps add one later)
for CV and Holdout data
"""
# Import the usual suspects
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Scorers of various sorts
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, classification_report
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import make_scorer
def feature_imp(model,X,text=True,head=10,graph=True,title='Feature Importance',fgsz=(12,12)):
""" Generate a graph with feature importances, given a fit model 'model', for a dataset X
model: a model that has been fit that has a feature_importances_ method
X: a dataset, typically as a dataframe
text = True: prints out the feature importances
head = 10: the number of lines of feature importances to print out
graph = True: print graph
title = 'Feature Importance': title of the graph
fgsz = (12,12): size of the graph. 12,12 is great for examining but 6,6 may be better if you have
many models in a single notebook.
"""
# Feature Importances
A = pd.DataFrame(model.feature_importances_)
# Feature Names
Xc = pd.DataFrame(X.columns)
# Create a data set Mat that has all the feature importance values
# concatenated to each respective feature name
Mat = pd.concat([Xc,A],axis=1)
#Rename columns
Mat.columns=['features','importance']
if text == True:
print(Mat.sort_values('importance',ascending=False).head(head))
# Seaborn Barplot graph
if graph==True:
sns.set_style('darkgrid')
plt.figure(figsize=fgsz)
ax = sns.barplot(x='features',y='importance',data=Mat)
ax.set_xticklabels(ax.get_xticklabels(),rotation = 90)
plt.title(title)
plt.show()
# Mass Scoring Function
def mass_scoring(y,y_pred,title='Train'):
"""
Input classification truth and predictions (not probabilities)
Returns a list with precision, recall, f1, and support for both micro and weighted averages
Prints f1/acc, f1(wt), and confusion matrix
title='Train': Title of the data, such as 'CV', 'Holdout'
The precision,recall,fscore,support are all delivered returned list X_scores
To extract measures that use micro average:
X_scores[0] is precision, X_scores[1] is recall, X_scores[2] is f1, X_scores[3] is support\
Tp extract measures that use weighted average:
X_scores[4] is precision, X_scores[5] is recall, X_scores[6] is f1, X_scores[7] is support
"""
# precision, recall, f1 and support function from sklearn
prfs_mic = precision_recall_fscore_support(y,y_pred,average='micro')
prfs_wt = precision_recall_fscore_support(y,y_pred,average='weighted')
# Individual components of each
f1mic = prfs_mic[2]
f1wt = prfs_wt[2]
rmic = prfs_mic[1]
rwt = prfs_wt[1]
pmic = prfs_mic[0]
pwt = prfs_wt[0]
smic = prfs_mic[3]
swt = prfs_wt[3]
conf_mat = confusion_matrix(y,y_pred)
# Print the f1/acc, f1(wt) and confusion matrix
print('\n'+title+' Data: ')
print('f1(micro)/acc: '+str(f1mic)+', f1(wt): '+str(f1wt))
print('Confusion Matrix (' + title +'):')
print( | pd.DataFrame(conf_mat) | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup
from re import search
import unicodedata
import pandas as pd
from multiprocessing import Pool
from seo import *
# ...
i=0
def get_head(link):
Topics = {}
h1_new = []
h2_new = []
h3_new = []
Topics = {}
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
request = requests.get(link, headers=headers)
Soup = BeautifulSoup(request.text, 'lxml')
a = []
b = []
c = []
h1_tags = ["h1"]
for tags in Soup.find_all(h1_tags):
a.append(tags.text.strip())
h2_tags = ["h2"]
for tags in Soup.find_all(h2_tags):
b.append(tags.text.strip())
h3_tags = ["h3"]
for tags in Soup.find_all(h3_tags):
c.append(tags.text.strip())
h1 = []
[h1.append(x) for x in a if x not in h1]
h2 = []
[h2.append(x) for x in b if x not in h2]
h3 = []
[h3.append(x) for x in c if x not in h3]
if len(h1) ==0:
message = ""
h1_new.append(message)
else:
for sub in h1:
string_encode = unicodedata.normalize("NFKD", sub)
h1_new.append(string_encode)
if len(h2) ==0:
message = ""
h2_new.append(message)
else:
for sub in h2:
string_encode = unicodedata.normalize("NFKD", sub)
h2_new.append(string_encode)
if len(h3) ==0:
message = ""
h3_new.append(message)
else:
for sub in h3:
string_encode = unicodedata.normalize("NFKD", sub)
h3_new.append(string_encode)
L = [['Heading 1', h1_new], ['Heading 2', h2_new], ['Heading 3',h3_new]]
Topics[link] = L
return Topics
def excel_maker(main_dict, search_query, links):
H1_Headings = []
H2_Headings = []
H3_Headings = []
Mega_list1 = []
Mega_list2 = []
Mega_list3 = []
r_l=related_queries(search_query)
t_l=related_topics(search_query)
for keys, values in main_dict.items():
H1_Headings.append(values[0][1])
H2_Headings.append(values[1][1])
H3_Headings.append(values[2][1])
for keys, values in main_dict.items():
H1_Headings.append(values[0][1])
H2_Headings.append(values[1][1])
H3_Headings.append(values[2][1])
for i in H1_Headings:
for j in i:
Mega_list1.append(j)
for i in H2_Headings:
for j in i:
Mega_list2.append(j)
for i in H3_Headings:
for j in i:
Mega_list3.append(j)
res1 = []
res2 = []
res3 = []
for i in Mega_list1:
if i not in res1:
res1.append(i)
for i in Mega_list2:
if i not in res2:
res2.append(i)
for i in Mega_list3:
if i not in res3:
res3.append(i)
if len(res1) >= len(res2) and len(res1) >= len(res3):
for i in range(len(res1)-len(res2)):
res2.append('[]')
for i in range(len(res1)-len(res3)):
res3.append('[]')
if len(res2) >= len(res1) and len(res2) >= len(res3):
for i in range(len(res2)-len(res1)):
res1.append('[]')
for i in range(len(res2)-len(res3)):
res3.append('[]')
if len(res3) >= len(res1) and len(res3) >= len(res2):
for i in range(len(res3)-len(res1)):
res1.append('[]')
for i in range(len(res3)-len(res2)):
res2.append('[]')
res1 = list(filter(None, res1))
res2 = list(filter(None, res2))
res3 = list(filter(None, res3))
ls = []
for i in links:
ls.append(search_query)
data_tuples = list(zip(ls,links,H1_Headings,H2_Headings,H3_Headings))
df1 = pd.DataFrame(data_tuples, columns=['Search_query','URL','H1_Headings','H2_Headings','H3_Headings'])
df1
import people_also_ask
list1 = []
for question in people_also_ask.get_related_questions(search_query):
list1.append(question)
data_tuples = list(zip(ls,list1))
df2 = pd.DataFrame(data_tuples, columns=['Search_query','People_also_asked'])
df2
list_final = []
for i in res1:
list_final.append(search_query)
data_tuples = list(zip(list_final,res1,res2,res3))
df3 = pd.DataFrame(data_tuples, columns=['Search_query','Unique H1_Headings','Unique H2_Headings','Unique H3_Headings'])
df3
filename = "%s.xlsx" % search_query
with | pd.ExcelWriter(filename, engine='xlsxwriter') | pandas.ExcelWriter |
"""
Plot allele usage
"""
import sys
import logging
import pandas as pd
import dnaio
from ..table import read_table
logger = logging.getLogger(__name__)
def add_arguments(parser):
arg = parser.add_argument
arg('--d-evalue', type=float, default=1E-4,
help='Maximal allowed E-value for D gene match. Default: %(default)s')
arg('--d-coverage', '--D-coverage', type=float, default=65,
help='Minimum D coverage (in percent). Default: %(default)s%%)')
arg('--database', metavar='FASTA',
help='Restrict plotting to the sequences named in the FASTA file. '
'Only the sequence names are used!')
arg('--order', metavar='FASTA',
help='Sort genes according to the order of the records in the FASTA file.')
arg('--x', choices=('V', 'D', 'J'), default='V',
help='Type of gene on x axis. Default: %(default)s')
arg('--gene', choices=('V', 'D', 'J'), default='J',
help='Type of gene on y axis. Default: %(default)s')
arg('alleles', help='List of alleles to plot on y axis, separated by comma')
arg('table', help='Table with parsed and filtered IgBLAST results')
arg('plot', help='Path to output PDF or PNG')
def main(args):
usecols = ['v_call', 'd_call', 'j_call', 'V_errors', 'D_errors', 'J_errors', 'D_covered',
'd_support']
# Support reading a table without D_errors
try:
table = read_table(args.table, usecols=usecols)
except ValueError:
usecols.remove('D_errors')
table = read_table(args.table, usecols=usecols)
logger.info('Table with %s rows read', len(table))
if args.x == 'V' or args.gene == 'V':
table = table[table.V_errors == 0]
logger.info('%s rows remain after requiring V errors = 0', len(table))
if args.gene == 'J' or args.x == 'J':
table = table[table.J_errors == 0]
logger.info('%s rows remain after requiring J errors = 0', len(table))
if args.gene == 'D' or args.x == 'D':
table = table[table.d_support <= args.d_evalue]
logger.info('%s rows remain after requiring D E-value <= %s', len(table), args.d_evalue)
table = table[table.D_covered >= args.d_coverage]
logger.info('%s rows remain after requiring D coverage >= %s', len(table), args.d_coverage)
if 'D_errors' in table.columns:
table = table[table.D_errors == 0]
logger.info('%s rows remain after requiring D errors = 0', len(table))
gene1 = args.x.lower() + '_call'
gene2 = args.gene.lower() + '_call'
expression_counts = table.groupby((gene1, gene2)).size().to_frame().reset_index()
matrix = pd.DataFrame(
expression_counts.pivot(index=gene1, columns=gene2, values=0).fillna(0), dtype=int)
# matrix[v_gene,d_gene] gives co-occurrences of v_gene and d_gene
print('#\n# Expressed genes with counts\n#')
# The .sum() is along axis=0, that is, the V gene counts are summed up,
# resulting in counts for each D/J gene
for g, count in matrix.sum().iteritems():
print(g, '{:8}'.format(count))
alleles = args.alleles.split(',')
for allele in alleles:
if allele not in matrix.columns:
logger.error('Allele %s not expressed in this dataset', allele)
sys.exit(1)
matrix = matrix.loc[:, alleles]
if args.database:
with dnaio.open(args.database) as f:
x_names = [record.name for record in f if record.name in matrix.index]
if not x_names:
logger.error('None of the sequence names in %r were found in the input table',
args.database)
sys.exit(1)
matrix = matrix.loc[x_names, :]
if args.order:
with dnaio.open(args.order) as f:
ordered_names = [r.name.partition('*')[0] for r in f]
gene_order = {name: index for index, name in enumerate(ordered_names)}
def orderfunc(full_name):
name, _, allele = full_name.partition('*')
allele = int(allele)
try:
index = gene_order[name]
except KeyError:
logger.warning('Gene name %s not found in %r, placing it at the end',
name, args.order)
index = 1000000
return index * 1000 + allele
matrix['v_call_tmp'] = | pd.Series(matrix.index, index=matrix.index) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from pathlib import Path
from time import time
import copy
from skimage import color
import matplotlib as mpl
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV
from scipy.stats import loguniform
import umap
# if not installed run: conda install -c conda-forge umap-learn
import hdbscan
from sklearn.cluster import OPTICS, cluster_optics_dbscan
from skimage import io
from scipy.stats import ttest_ind # Welch's t-test
from scipy.stats import mannwhitneyu # Mann-Whitney rank test
from scipy.stats import ks_2samp # Kolmogorov-Smirnov statistic
import sys
sys.path.extend([
'../../tysserand',
'../../mosna',
])
from tysserand import tysserand as ty
from mosna import mosna
# If need to reload modules after their modification
from importlib import reload
# ty = reload(ty)
# mosna = reload(mosna)
# sns = reload(sns)
# plt = reload(plt)
data_dir = Path("../../Commons/Common_data/MIBI-TOF__Triple_Negative_Breast_Cancer__Angelo_lab/processed_data")
patients_path = data_dir / "patient_class.csv"
objects_path = data_dir / "cellData.csv"
images_path = list(data_dir.glob('*.tiff'))
# relate image paths to patient numbers
import re
img_path_patients = [int(re.search('/processed_data/p(.+?)_labeledcell', str(s)).group(1)) for s in images_path]
pat_img = pd.Series(images_path, index=img_path_patients)
##### Patients data
patients = pd.read_csv(patients_path, index_col=0, header=None, names=['patient', 'response'])
### Objects data
obj = | pd.read_csv(objects_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = | DataFrame(data=data, index=index, columns=columns, dtype='object') | pandas.DataFrame |
import pytest
import pandas as pd
from numpy.testing import assert_equal
from pandas.testing import assert_series_equal, assert_index_equal
from siuba.dply.forcats import fct_recode, fct_collapse
@pytest.fixture
def series1():
yield | pd.Series(["a", "b", "c", "d"]) | pandas.Series |
import sqlite3, datetime, os, multiprocessing as mp, concurrent.futures
from pathlib import Path
import click
import pandas as pd
import SimpleITK as sitk
import pydicom
from tqdm import tqdm
ifr = sitk.ImageFileReader()
ifr.LoadPrivateTagsOn()
dcm_tags = { # Attributes
"0008|0005": "SpecificCharacterSet",
"0008|0008": "ImageType",
"0008|0012": "InstanceCreationDate",
"0008|0013": "InstanceCreationTime",
"0008|0016": "SOPClassUID",
"0008|0018": "SOPInstanceUID",
"0008|0020": "StudyDate",
"0008|0021": "SeriesDate",
"0008|0022": "AcquisitionDate",
"0008|0023": "ContentDate",
"0008|0030": "StudyTime",
"0008|0031": "SeriesTime",
"0008|0032": "AcquisitionTime",
"0008|0033": "ContentTime",
"0008|0050": "AccessionNumber",
"0008|0060": "Modality",
"0008|0070": "Manufacturer",
"0008|1010": "StationName",
"0008|1030": "StudyDescription",
"0008|103e": "SeriesDescription",
"0008|1040": "InstitutionalDepartmentName",
"0008|1090": "ManufacturersModelName",
"0010|0020": "PatientID",
"0010|0030": "PatientsBirthDate",
"0010|0040": "PatientsSex",
"0010|1010": "PatientsAge",
"0010|21b0": "AdditionalPatientHistory",
"0012|0062": "PatientIdentityRemoved",
"0012|0063": "DeidentificationMethod",
"0018|0015": "BodyPartExamined",
"0018|0020": "ScanningSequence",
"0018|0021": "SequenceVariant",
"0018|0022": "ScanOptions",
"0018|0023": "MRAcquisitionType",
"0018|0024": "SequenceName",
"0018|0050": "SliceThickness",
"0018|0080": "RepetitionTime",
"0018|0081": "EchoTime",
"0018|0083": "NumberofAverages",
"0018|0084": "ImagingFrequency",
"0018|0085": "ImagedNucleus",
"0018|0087": "MagneticFieldStrength",
"0018|0088": "SpacingBetweenSlices",
"0018|0089": "NumberofPhaseEncodingSteps",
"0018|0091": "EchoTrainLength",
"0018|0093": "PercentSampling",
"0018|0094": "PercentPhaseFieldofView",
"0018|1000": "DeviceSerialNumber",
"0018|1030": "ProtocolName",
"0018|1310": "AcquisitionMatrix",
"0018|1312": "InplanePhaseEncodingDirection",
"0018|1314": "FlipAngle",
"0018|1315": "VariableFlipAngleFlag",
"0018|5100": "PatientPosition",
"0018|9087": "Diffusionbvalue",
"0020|000d": "StudyInstanceUID",
"0020|000e": "SeriesInstanceUID",
"0020|0010": "StudyID",
"0020|0032": "ImagePositionPatient",
"0020|0037": "ImageOrientationPatient",
"0020|0052": "FrameofReferenceUID",
"0020|1041": "SliceLocation",
"0028|0002": "SamplesperPixel",
"0028|0010": "Rows",
"0028|0011": "Columns",
"0028|0030": "PixelSpacing",
"0028|0100": "BitsAllocated",
"0028|0101": "BitsStored",
"0028|0106": "SmallestImagePixelValue",
"0028|0107": "LargestImagePixelValue",
"0028|1050": "WindowCenter",
"0028|1051": "WindowWidth",
"0040|0244": "PerformedProcedureStepStartDate",
"0040|0254": "PerformedProcedureStepDescription"
}
TABLE_DOSSIERS = "Dossiers"
TABLE_PATH = "InputPath"
ORDER_BY = "SeriesTime,StudyTime,StudyInstanceUID,SeriesInstanceUID,PatientID"
def get_pydicom_value(data: pydicom.dataset.FileDataset, key: str):
key = '0x' + key.replace('|', '')
if key in data:
result = data[key]
return result.value if not result.is_empty else None
return None
class Connection:
def __init__(self, path):
self._conn = sqlite3.connect(os.path.abspath(path))
self._c = self._conn.cursor()
self.name = path.name
def __del__(self):
self._conn.close()
def _refactor_result(self, results: list):
desc = self._c.description
R = []
for r in results:
item = dict()
for i, name in enumerate(desc):
item[name[0]] = r[i]
R.append(item)
return R
def select(self, include_siblings=True, **kvp):
q = []
for key, value in kvp.items():
values = value.split(',')
values = [f"{key} LIKE '%{v}%'" for v in values]
values = ' OR '.join(values)
q.append(f"({values})")
selection = 'StudyInstanceUID,SeriesInstanceUID' if include_siblings else '*'
Q = f"SELECT {selection} FROM {TABLE_DOSSIERS} WHERE {' AND '.join(q)} ORDER BY {ORDER_BY}"
# studies can be duplicate (set), series should be unique (list)
studies, series = set(), []
for study, serie in self._c.execute(Q).fetchall():
studies.add(study)
series.append(serie)
temp = 'TEMP'
df = pd.DataFrame.from_records([{'uid': u} for u in studies])
df.to_sql(name=temp, con=self._conn, if_exists='replace')
R = self._c.execute(f"SELECT * FROM {TABLE_DOSSIERS} orig, {temp} t WHERE t.uid = orig.StudyInstanceUID").fetchall()
R = self._refactor_result(R)
self._c.execute(f"DROP TABLE {temp}")
return R, series
def select_all(self):
Q = f"SELECT * FROM {TABLE_DOSSIERS} ORDER BY {ORDER_BY}"
return self._refactor_result(self._c.execute(Q).fetchall())
class Dossier:
def __init__(self, input_dir: Path, dcm_dir: Path, dcms: list):
self.dcm_dir = dcm_dir
self.sample = dcms[-1]
self.sample_path = str(input_dir / dcm_dir / self.sample)
self.dcms = dcms
self._headers = None
def __len__(self):
return len(self.dcms)
def is_valid(self):
try:
pydicom.dcmread(self.sample_path, specific_tags=['0x00080005'])
except:
try:
ifr.SetFileName(self.sample_path)
ifr.ReadImageInformation()
except:
return False
return True
@property
def headers(self):
if not self._headers:
self._headers = self._dossier_to_row()
return self._headers
def _dossier_to_row(self):
try:
dcm = pydicom.dcmread(self.sample_path)
get_metadata = lambda key: get_pydicom_value(dcm, key)
except:
try:
ifr.SetFileName(self.sample_path)
ifr.ReadImageInformation()
get_metadata = lambda key: ifr.GetMetaData(key)
except Exception as e:
print(f"EXCEPTION (skipping): {self.dcm_dir}")
print(e)
return None
headers = {'SeriesLength': len(self), 'Path': str(self.dcm_dir), 'Sample': self.sample}
for key, header in dcm_tags.items():
try:
header = header.replace(' ', '_').strip()
val = get_metadata(key)
try:
if 'Date' in header:
val = datetime.datetime(int(val[:4]), int(val[4:6]), int(val[6:]))
# if 'Time' in header:
# return f'{val[0:2]}:{val[2:4]}:{val[4:6]}.{int(val[7:])}'
finally:
val = val.strip()
headers[header] = val
except:
headers[header] = None
return headers
# def header_to_date(header):
# header = str(header)
# return datetime.datetime(int(header[:4]), int(header[4:6]), int(header[6:]))
def create(input: Path, output: Path):
try:
sqlite3.connect(output).close()
click.echo(f"Gathering DICOMs from {input} and its subdirectories")
dcms = dict()
dirs = os.listdir(input)
def walk_input(dir: Path):
for dirpath, dirnames, filenames in os.walk(input / dir):
for filename in [f for f in filenames if f.endswith(".dcm")]:
dpath = str(Path(dirpath).relative_to(input))
dcms[dpath] = dcms.get(dpath, []) + [filename]
with concurrent.futures.ThreadPoolExecutor(min(32, (os.cpu_count() or 1) + 4)) as executor:
list(tqdm(executor.map(walk_input, dirs), total=len(dirs)))
# Create Dossier items
dossiers = []
for subpath, filenames in dcms.items():
dossiers.append(Dossier(input, subpath, filenames))
click.echo(f"Creating database from {len(dossiers)} DICOM directories")
rows = []
def process_dossier(dossier):
headers = dossier.headers
if headers:
rows.append(dossier.headers)
with concurrent.futures.ThreadPoolExecutor(min(32, (os.cpu_count() or 1) + 4)) as executor:
list(tqdm(executor.map(process_dossier, dossiers), total=len(dossiers)))
click.echo(f"Writing {len(rows)} rows to SQL database.")
df_dossiers = | pd.DataFrame.from_dict(rows, orient='columns') | pandas.DataFrame.from_dict |
import sys
# importing user defined modules
import field_extraction
import lib
import resume_sectioning
import inspect
import logging
import os
import pandas as pd
import numpy as np
import re
import spacy
#import en_core_web_sm
# hide settingwithcopywarning
pd.options.mode.chained_assignment = None
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # get location of main.py
parentdir = os.path.dirname(currentdir) # get parent directory of main.py (where repository is on local)
sys.path.insert(0, parentdir) # sys.path is the module search path
def main(root_file_path, job_id):
logging.getLogger().setLevel(logging.WARNING) # essentially does print statements to help debug (WARNING)
# logging explained https://appdividend.com/2019/06/08/python-logging-tutorial-with-example-logging-in-python/
observations = extract(root_file_path) # get text from pdf resumes
# to make it like Kraft's
observations['ReqID'] = np.repeat(job_id, len(observations))
observations.dropna(inplace=True)
# to compare it to already parsed info to not have to reparse
current_observations = | pd.read_csv(root_file_path + "Resume-Parser-master-new/data/output/resume_summary.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
def build_raw():
df = pd.DataFrame()
# Create columns to store data
df.insert(0, "Iteracion", | pd.Series([], dtype=int) | pandas.Series |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = | DataFrame([["1", "2"], ["4", "5", "6"]]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
def launch_Weighting(directory, out_directory = None, reporting = False):
print("WEIGHTING STARTING")
if out_directory == None:
out_directory=directory
iaaFiles = []
for root, dir, files in os.walk(directory):
for file in files:
if file.endswith('.csv') and 'Dep' in file:
iaaFiles.append(directory+'/'+file)
print("IAA files found:", iaaFiles)
weight_list = []
#get holistic so different weight keys can be used for different types of articles
weight_col = 'Point_Recommendation'
for f in iaaFiles:
if 'olistic' in f:
holistic = pd.read_csv(f, encoding='utf-8')
q1 = holistic[holistic['question_Number'] == 1]
if len(q1) > 0 and int(q1.iloc[0]['agreed_Answer']) == 3:
weight_col = 'Op-Ed'
break
dirname = os.path.dirname(__file__)
# can't use os.path.join, probably because windows uses \ as a separator instead of /
weight_key_path = dirname + os.sep + 'config' + os.sep + 'weight_key.csv'
weight_scaling_path = dirname + os.sep + 'config' + os.sep + 'weight_key_scaling_guide.csv'
for f in iaaFiles:
weight = weighting_alg(f, weight_key_path, weight_scaling_path, out_directory,reporting=reporting,
weight_col = weight_col)
if weight is not None and not weight.empty:
weight_list.append(weight)
if len(weight_list) == 0:
print("No weights")
file = pd.read_csv(iaaFiles[0], encoding = 'utf-8')
columns = file.columns.tolist()
weight_key_cols = pd.read_csv(weight_key_path, encoding= 'utf-8').columns.tolist()
columns = columns + weight_key_cols + ['agreement_adjusted_points', 'Schema']
weights = pd.DataFrame(columns = columns)
weights = weights.loc[:, ~weights.columns.duplicated()]
else:
weights = pd.concat(weight_list)
return weights
def weighting_alg(IAA_csv_file, credibility_weights_csv_file, weight_scale_csv, directory = './', reporting = False,
weight_col = 'Point_Recommendation'):
IAA_csv = pd.read_csv(IAA_csv_file)
#IndexError when the csv is empty
try:
IAA_csv_schema_name = IAA_csv.namespace.iloc[0]
except IndexError:
if IAA_csv.shape[0]<1:
return
else:
print(len(IAA_csv))
print(IAA_csv)
raise Exception('Index Error')
if "uage" in IAA_csv_schema_name:
IAA_csv_schema_type = "Language"
elif "Reason" in IAA_csv_schema_name:
IAA_csv_schema_type = "Reasoning"
elif "Evidence" in IAA_csv_schema_name:
IAA_csv_schema_type = "Evidence"
elif "Probability" in IAA_csv_schema_name:
IAA_csv_schema_type = "Probability"
elif 'olistic' in IAA_csv_schema_name:
IAA_csv_schema_type = "Holistic"
elif 'ource' in IAA_csv_schema_name:
IAA_csv_schema_type = "Sourcing"
else:
print("unweighted IAA", IAA_csv_file, "aborting")
return
IAA_csv = IAA_csv.rename(columns={ "question_Number": "Question_Number", 'agreed_Answer': 'Answer_Number'})
IAA_csv['Schema'] = IAA_csv_schema_type
credibility_weights_csv = pd.read_csv(credibility_weights_csv_file)
if IAA_csv_schema_name not in credibility_weights_csv.values:
raise Exception("Couldn't find weights for schema namespace {}".format(IAA_csv_schema_name))
weight_scale_table = | pd.read_csv(weight_scale_csv) | pandas.read_csv |
import argparse #This package is used to read in arguments from command line.
import configparser #This package is used to read in configuration files
import pandas as pd
import xgboost as xgb
import os
import pickle
from QUANTAXIS.QAUtil import QASETTING
from QUANTAXIS.TSFetch.fetchdata import getrawfrommongodb
from QUANTAXIS.TSSU.save_prediction import TS_SU_save_prediction
from QUANTAXIS.TSSU import save_rawdata
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
config.read('./QUANTAXIS/TSBoosting/config.ini')
'''
get arguments from cli, run:
python3 setup.py install
python3 timeseriesfcst -c /Users/you/Desktop/timeseriesfcst_config/config.ini
# create a new argument parser
parser = argparse.ArgumentParser(description="Simple argument parser")
# add a new command line option, call it '-c' and set its destination to 'config'
parser.add_argument("-c", action="store", dest="config_file")
# get the result
result = parser.parse_args()
# read configuration from configuration file.
config = configparser.ConfigParser()
config.read(result.config_file)
'''
# def readdata(path):
# '''This function reads in a data frame with index 'dtindex' from a specified path.
# Args:
# path: a path that specifies the location of the data.
# Returns:
# A data frame indexed with standard datetime index. The column name of that index must be dtindex.
# '''
#
# data = pd.read_csv(path)
# data.index = data['dtindex']
#
# return data.drop(columns='dtindex')
def fillinmissing(data, dtindex, fillin=None, indicator=False):
'''This function takes a data frame that is indexed by standard datetime index.
It completes the data frame by encoding values to missing records.
Args:
data: a data frame that is indexed by datetime index with missing records to be filled in.
dtindex: a full datetime index list as a reference to locate the missing records.
fillin: indicate what value should be filled in.
indicator: if is True. The function will add an additional column indicts which row is newly filled in.
Returns:
A data frame without missing records.
'''
fulldata = | pd.DataFrame(index=dtindex) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
assert return_value is None
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
# 0 -> False and 1-> True
# any other value would be duplicated
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
# validate shallow copy
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.fixture(
params=["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"]
)
def dtype(self, request):
return request.param
@pytest.fixture
def cat_series1(self, dtype, ordered):
# Test case 1
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
cat = Categorical(input1, categories=cat_array, ordered=ordered)
tc1 = Series(cat)
return tc1
def test_drop_duplicates_categorical_non_bool(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, False, True])
result = tc1.duplicated()
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates()
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, False])
result = tc1.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, True])
result = tc1.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
@pytest.fixture
def cat_series2(self, dtype, ordered):
# Test case 2; TODO: better name
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
cat = Categorical(input2, categories=cat_array, ordered=ordered)
tc2 = Series(cat)
return tc2
def test_drop_duplicates_categorical_non_bool2(self, cat_series2):
# Test case 2; TODO: better name
tc2 = cat_series2
expected = Series([False, False, False, False, True, True, False])
result = tc2.duplicated()
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates()
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, False, False, False])
result = tc2.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, True, True, False])
result = tc2.duplicated(keep=False)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
'''
@author: <NAME>
Replace the filenames of the best models you found in networks
'''
import tensorflow as tf
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=tf_config))
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import load_model
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, cohen_kappa_score
import configparser, argparse, datetime, visualize, json, os
import pandas as pd
networks = { 'resnet50': ['crop_resnet50' + x + '.h5' for x in ['2019-12-26 06:02:10']],
'densenet':['crop_densenet' + x + '.h5' for x in ['2020-01-01 01:57:07']],
'vgg16':['crop_vgg16'+x+'.h5' for x in ['2019-12-14 17:59:47']]}
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configPath', help="""path to config file""", default='./config.ini')
parser.add_argument('-t', '--task', help="""task you are performing - refers to the header for each section in the config file""", default='crop-vgg16')
parser.add_argument('-n', '--network', help="""name of the network you are predicting for""", default='vgg16')
parser_args = parser.parse_args()
config = configparser.ConfigParser()
config.read(parser_args.configPath)
te_path = str(config[parser_args.task]['TEST_FOLDER'])
input_size = 224
all_test_accuracies = []
all_kappa_scores = []
for timestamp in networks[parser_args.network]:
model_name = os.path.join('/home/kgadira/multi-modal-crop-classification/8_models/', timestamp)
predicted_probabilities_csv_name = './{}-probs.csv'.format(config[parser_args.task])
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(te_path, target_size = (input_size, input_size), class_mode='categorical', shuffle = False, batch_size=1)
model = load_model(model_name)
print(model.summary())
data_paths = test_generator.filenames
results = model.predict_generator(test_generator,
steps=int(test_generator.samples/1.),
verbose=1)
predictions = np.argmax(results, axis = 1)
actual_labels = test_generator.classes
cm = confusion_matrix(actual_labels, predictions)
print(cm)
classes_lst = ['Corn', 'Cotton', 'Soy', 'Spring Wheat', 'Winter Wheat', 'Barley']
creport = classification_report(y_true = actual_labels, y_pred=predictions, target_names = classes_lst, digits = 4, output_dict = True)
creport_df = pd.DataFrame(creport).transpose()
acc = accuracy_score(actual_labels, predictions)
all_test_accuracies.append(acc)
kappa_score = cohen_kappa_score(actual_labels, predictions)
all_kappa_scores.append(kappa_score)
print(creport_df)
print('Accuracy for {} is {}, kappa score is {}'.format(timestamp, acc, kappa_score))
predict_df = | pd.DataFrame(data=results, columns=['SP0', 'SP1', 'SP2', 'SP3', 'SP4', 'SP5']) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index,
identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in | compat.iteritems(subSeries) | pandas.compat.iteritems |
"""Simulate a rollout and plot the time series result"""
from copy import copy
import random
from typing import List, Tuple, Optional, TYPE_CHECKING
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import pandas as pd
import seaborn as sns
import torch
import tqdm
from neural_clbf.experiments import Experiment
from neural_clbf.systems.utils import ScenarioList
if TYPE_CHECKING:
from neural_clbf.controllers import Controller # noqa
class RolloutTimeSeriesExperiment(Experiment):
"""An experiment for plotting rollout performance of controllers.
Plots trajectories as a function of time.
"""
def __init__(
self,
name: str,
start_x: torch.Tensor,
plot_x_indices: List[int],
plot_x_labels: List[str],
plot_u_indices: List[int],
plot_u_labels: List[str],
scenarios: Optional[ScenarioList] = None,
n_sims_per_start: int = 5,
t_sim: float = 5.0,
):
"""Initialize an experiment for simulating controller performance.
args:
name: the name of this experiment
plot_x_indices: a list of the indices of the state variables to plot
plot_x_labels: a list of the labels for each state variable trace
plot_u_indices: a list of the indices of the control inputs to plot
plot_u_labels: a list of the labels for each control trace
scenarios: a list of parameter scenarios to sample from. If None, use the
nominal parameters of the controller's dynamical system
n_sims_per_start: the number of simulations to run (with random parameters),
per row in start_x
t_sim: the amount of time to simulate for
"""
super(RolloutTimeSeriesExperiment, self).__init__(name)
# Save parameters
self.start_x = start_x
self.plot_x_indices = plot_x_indices
self.plot_x_labels = plot_x_labels
self.plot_u_indices = plot_u_indices
self.plot_u_labels = plot_u_labels
self.scenarios = scenarios
self.n_sims_per_start = n_sims_per_start
self.t_sim = t_sim
@torch.no_grad()
def run(self, controller_under_test: "Controller") -> pd.DataFrame:
"""
Run the experiment, likely by evaluating the controller, but the experiment
has freedom to call other functions of the controller as necessary (if these
functions are not supported by all controllers, then experiments will be
responsible for checking compatibility with the provided controller)
args:
controller_under_test: the controller with which to run the experiment
returns:
a pandas DataFrame containing the results of the experiment, in tidy data
format (i.e. each row should correspond to a single observation from the
experiment).
"""
# Deal with optional parameters
if self.scenarios is None:
scenarios = [controller_under_test.dynamics_model.nominal_params]
else:
scenarios = self.scenarios
# Set up a dataframe to store the results
results_df = | pd.DataFrame() | pandas.DataFrame |
# Created by woochanghwang at 28/06/2020
# Modified by woochang at 21/07/2021
'''
Count ATC code
Draw Bar graph
'''
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def count_ATC_of_candidate_drugs():
virus = "SARS-CoV-2"
# candidate_drug_atc_df = pd.read_excel(f"../result/{virus}/Drug/{virus}_candidate_drug_info_target.SIP.CT.xlsx")
candidate_drug_atc_df = | pd.read_excel(f"../result/{virus}/Drug/{virus}_candidate_drug_info_target.SIP.xlsx") | pandas.read_excel |
"""Module for representing a CastorStudy in Python."""
import itertools
import math
import pathlib
import re
import sys
from datetime import datetime
from operator import attrgetter
from typing import List, Optional, Any, Union, Dict
import pandas as pd
from tqdm import tqdm
from castoredc_api import CastorClient, CastorException
from castoredc_api.study.castor_objects.castor_data_point import CastorDataPoint
from castoredc_api.study.castor_objects import (
CastorField,
CastorFormInstance,
CastorRecord,
CastorStep,
CastorForm,
CastorStudyFormInstance,
CastorSurveyFormInstance,
CastorReportFormInstance,
)
class CastorStudy:
"""Object representing a study in Castor.
Functions as the head of a tree for all interrelations.
Needs an authenticated api_client that is linked to the same study_id to call data."""
# pylint: disable=too-many-instance-attributes
# Necessary number of attributes to allow caching of information
# pylint: disable=too-many-public-methods
# Necessary number of public methods to interact with study
# pylint: disable=too-many-arguments
# Necessary number of arguments to setup study
def __init__(
self,
client_id: str,
client_secret: str,
study_id: str,
url: str,
test=False,
format_options=None,
) -> None:
"""Create a CastorStudy object."""
self.study_id = study_id
# Set configuration settings
self.configuration = {
"date": "%d-%m-%Y",
"datetime": "%d-%m-%Y %H:%M",
"datetime_seconds": "%d-%m-%Y %H:%M:%S",
"time": "%H:%M",
}
if format_options:
self.configuration.update(format_options)
# Create the client to interact with the study
if test is False:
self.client = CastorClient(client_id, client_secret, url)
self.client.link_study(study_id)
# List of all forms in the study - structure
self.forms_on_id = {}
self.forms_on_name = {}
# Dictionary to store the relationship between a form instance and its form ID
self.form_links = {}
# List of all records in the study - data
self.records = {}
# List of dictionaries of optiongroups
self.optiongroups = {}
# Container variables to save time querying the database
self.__all_report_instances = {}
# STRUCTURE MAPPING
def map_structure(self) -> None:
"""Maps the structure for the study."""
# Reset structure & data
self.forms_on_id = {}
self.forms_on_name = {}
self.form_links = {}
self.records = {}
self.optiongroups = {}
self.__all_report_instances = {}
# Get the structure from the API
print("Downloading Study Structure.", flush=True, file=sys.stderr)
data = self.client.export_study_structure()
# Loop over all fields
for field in tqdm(data, desc="Mapping Study Structure"):
# Check if the form for the field exists, if not, create it
form = self.get_single_form(field["Form Collection ID"])
if form is None:
form = CastorForm(
form_collection_type=field["Form Type"],
form_collection_id=field["Form Collection ID"],
form_collection_name=field["Form Collection Name"],
form_collection_order=field["Form Collection Order"],
)
self.add_form(form)
# Check if the step for the field exists, if not, create it
step = form.get_single_step(field["Form ID"])
if step is None:
step = CastorStep(
step_id=field["Form ID"],
step_name=field["Form Name"],
step_order=field["Form Order"],
)
form.add_step(step)
# Create the field
new_field = CastorField(
field_id=field["Field ID"],
field_name=field["Field Variable Name"],
field_label=field["Field Label"],
field_type=field["Field Type"],
field_required=field["Field Required"],
field_option_group=field["Field Option Group"],
field_order=field["Field Order"],
)
step.add_field(new_field)
# Augment field data
self.__load_field_information()
# Map the field dependencies and optiongroups
self.__map_field_dependencies()
self.__load_optiongroups()
# DATA MAPPING
def map_data(self) -> None:
"""Maps the data for the study."""
self.map_structure()
self.update_links()
self.__link_data()
self.__load_record_information()
self.__load_survey_information()
self.__load_report_information()
def update_links(self) -> None:
"""Creates the links between form and form instances."""
# Reset form links
self.form_links = {}
# Get the name of the survey forms, as the export data can only be linked on name, not on id
print("Downloading Surveys.", flush=True, file=sys.stderr)
surveys = self.client.all_surveys()
self.form_links["Survey"] = {survey["name"]: survey["id"] for survey in surveys}
# Get all report instances that need to be linked
print("Downloading Report Instances.", flush=True, file=sys.stderr)
# Save this data from the database to save time later
report_instances = self.client.all_report_instances(archived=0)
archived_report_instances = self.client.all_report_instances(archived=1)
# Create dict with link id: object
self.__all_report_instances = {
report_instance["id"]: report_instance
for report_instance in report_instances + archived_report_instances
}
# Create dict with link instance_id: form_id
self.form_links["Report"] = {
instance_id: self.__all_report_instances[instance_id]["_embedded"][
"report"
]["id"]
for instance_id in self.__all_report_instances
}
# OPTIONGROUPS
def __load_optiongroups(self) -> None:
"""Loads all optiongroups through the client"""
# Get the optiongroups
print("Downloading Optiongroups", flush=True, file=sys.stderr)
optiongroups = self.client.all_field_optiongroups()
self.optiongroups = {
optiongroup["id"]: optiongroup for optiongroup in optiongroups
}
# AUXILIARY DATA
def __load_record_information(self) -> None:
"""Adds auxiliary data to records."""
print("Downloading Record Information.", flush=True, file=sys.stderr)
record_data = self.client.all_records()
for record_api in tqdm(record_data, desc="Augmenting Record Data"):
record = self.get_single_record(record_api["id"])
record.institute = record_api["_embedded"]["institute"]["name"]
record.randomisation_group = record_api["randomization_group_name"]
record.randomisation_datetime = self.__get_date_or_none(
record_api["randomized_on"]
)
record.archived = record_api["archived"]
def __load_survey_information(self) -> None:
"""Adds auxiliary data to survey forms."""
print("Downloading Survey Information.", flush=True, file=sys.stderr)
survey_package_data = self.client.all_survey_package_instances()
# Create mapping {survey_instance_id: survey_package}
survey_data = {
survey["id"]: {
"package": package,
"record": package["record_id"],
"survey": survey,
}
for package in survey_package_data
for survey in package["_embedded"]["survey_instances"]
}
for survey_instance, values in tqdm(
survey_data.items(), desc="Augmenting Survey Data"
):
# Test if instance in study
local_instance = self.get_single_form_instance_on_id(
instance_id=survey_instance, record_id=values["record"]
)
local_instance.created_on = self.__get_date_or_none(
values["package"]["created_on"]
)
local_instance.sent_on = self.__get_date_or_none(
values["package"]["sent_on"]
)
local_instance.progress = values["survey"]["progress"]
local_instance.completed_on = self.__get_date_or_none(
values["package"]["finished_on"]
)
local_instance.archived = values["package"]["archived"]
local_instance.survey_package_id = values["package"]["id"]
local_instance.survey_package_name = values["package"][
"survey_package_name"
]
def __load_report_information(self) -> None:
"""Adds auxiliary data to report forms."""
for instance_id, report_instance in tqdm(
self.__all_report_instances.items(),
"Augmenting Report Data",
):
# Test if instance in study
local_instance = self.get_single_form_instance_on_id(
instance_id=instance_id, record_id=report_instance["record_id"]
)
local_instance.created_on = datetime.strptime(
report_instance["created_on"], "%Y-%m-%d %H:%M:%S"
).strftime(self.configuration["datetime_seconds"])
if report_instance["parent_type"] == "phase":
local_instance.parent = self.get_single_form(
report_instance["parent_id"]
).form_name
elif report_instance["parent_type"] == "reportInstance":
local_instance.parent = self.get_single_form_instance_on_id(
report_instance["record_id"], report_instance["parent_id"]
).name_of_form
else:
local_instance.parent = "No parent"
local_instance.archived = report_instance["archived"]
def __load_field_information(self):
"""Adds auxillary information to fields."""
all_fields = self.client.all_fields()
for api_field in all_fields:
field = self.get_single_field(api_field["id"])
# Use -inf and inf for easy numeric comparison
field.field_min = (
-math.inf if api_field["field_min"] is None else api_field["field_min"]
)
field.field_max = (
math.inf if api_field["field_max"] is None else api_field["field_max"]
)
def __get_date_or_none(self, dictionary: Optional[dict]) -> Optional[datetime]:
"""Returns the date or None when no date found."""
date = (
None
if dictionary is None
else datetime.strptime(dictionary["date"], "%Y-%m-%d %H:%M:%S.%f").strftime(
self.configuration["datetime_seconds"]
)
)
return date
# FIELD DEPENDENCIES
def __map_field_dependencies(self) -> None:
"""Retrieves all field_dependencies and links them to the right field."""
print("Downloading Field Dependencies", flush=True, file=sys.stderr)
dependencies = self.client.all_field_dependencies()
# Format to dict of {child_id: {"parent_field": parent_field, "parent_value": value}
dependencies = {
dep["child_id"]: {
"parent_field": self.get_single_field(dep["parent_id"]),
"parent_value": dep["value"],
}
for dep in dependencies
}
for child_id in dependencies:
self.get_single_field(child_id).field_dependency = dependencies[child_id]
# DATA ANALYSIS
def export_to_dataframe(self, archived=False) -> dict:
"""Exports all data from a study into a dict of dataframes for statistical analysis."""
self.map_data()
dataframes = {
"Study": self.__export_study_data(archived),
"Surveys": self.__export_survey_data(archived),
"Reports": self.__export_report_data(archived),
}
return dataframes
def export_to_csv(self, archived=False) -> dict:
"""Exports all data to csv files.
Returns dict with file locations."""
now = f"{datetime.now().strftime('%Y%m%d %H%M%S.%f')[:-3]}"
dataframes = self.export_to_dataframe(archived)
# Instantiate output folder
pathlib.Path(pathlib.Path.cwd(), "output").mkdir(parents=True, exist_ok=True)
# Export dataframes
dataframes["Study"] = self.export_dataframe_to_csv(
dataframes["Study"], "Study", now
)
for survey in dataframes["Surveys"]:
dataframes["Surveys"][survey] = self.export_dataframe_to_csv(
dataframes["Surveys"][survey], survey, now
)
for report in dataframes["Reports"]:
dataframes["Reports"][report] = self.export_dataframe_to_csv(
dataframes["Reports"][report], report, now
)
return dataframes
def export_to_feather(self, archived=False) -> dict:
"""Exports all data to feather files.
Returns dict of file locations for export into R."""
now = f"{datetime.now().strftime('%Y%m%d %H%M%S.%f')[:-3]}"
dataframes = self.export_to_dataframe(archived)
# Instantiate output folder
pathlib.Path(pathlib.Path.cwd(), "output").mkdir(parents=True, exist_ok=True)
print("Writing data to feather files...", flush=True, file=sys.stderr)
dataframes["Study"] = self.export_dataframe_to_feather(
dataframes["Study"], "Study", now
)
for report in dataframes["Reports"]:
dataframes["Reports"][report] = self.export_dataframe_to_feather(
dataframes["Reports"][report], report, now
)
for survey in dataframes["Surveys"]:
dataframes["Surveys"][survey] = self.export_dataframe_to_feather(
dataframes["Surveys"][survey], survey, now
)
return dataframes
def export_dataframe_to_csv(
self, dataframe: pd.DataFrame, name: str, now: str
) -> str:
"""Exports a single dataframe to csv and returns the destination path."""
filename = re.sub(r"[^\w\-_\. ]", "_", name)
path = pathlib.Path(
pathlib.Path.cwd(), "output", f"{now} {self.study_id} {filename}.csv"
)
dataframe.to_csv(
path_or_buf=path,
sep=";",
index=False,
)
return str(path)
def export_dataframe_to_feather(
self, dataframe: pd.DataFrame, name: str, now: str
) -> str:
"""Exports a single dataframe to feather and returns the destination path."""
filename = re.sub(r"[^\w\-_\. ]", "_", name)
path = pathlib.Path(
pathlib.Path.cwd(), "output", f"{now} {self.study_id} {filename}.feather"
)
if dataframe.empty:
# If dataframe is empty, set all dtypes to object
# See also https://github.com/reiniervlinschoten/castoredc_api/issues/44
dataframe = dataframe.astype("object")
dataframe.reset_index(drop=True).to_feather(
path,
compression="uncompressed",
)
return str(path)
# HELPERS
def get_single_optiongroup(self, optiongroup_id: str) -> Optional[Dict]:
"""Get a single optiongroup based on id."""
return self.optiongroups.get(optiongroup_id)
def add_form(self, form: CastorForm) -> None:
"""Add a CastorForm to the study."""
self.forms_on_id[form.form_id] = form
self.forms_on_name[form.form_name] = form
form.study = self
def get_all_forms(self) -> List[CastorForm]:
"""Get all linked CastorForms."""
return list(self.forms_on_id.values())
def get_all_survey_forms(self) -> List[CastorForm]:
"""Gets all survey CastorForms."""
return self.get_all_form_type_forms("Survey")
def get_all_report_forms(self) -> List[CastorForm]:
"""Gets all report CastorForms."""
return self.get_all_form_type_forms("Report")
def get_all_form_type_forms(self, form_type: str) -> List[CastorForm]:
"""Gets all CastorForms of form_type."""
forms = self.get_all_forms()
return [form for form in forms if form.form_type == form_type]
def get_all_form_type_form_instances(
self, form_type: str
) -> List[CastorFormInstance]:
"""Gets all CastorForms of form_type."""
instances = self.get_all_form_instances()
return [
instance for instance in instances if instance.instance_type == form_type
]
def get_form_instances_by_form(self, form: CastorForm) -> List:
"""Gets all CastorFormInstances that are an instance of the given Form"""
instances = self.get_all_form_instances()
return [instance for instance in instances if instance.instance_of == form]
def get_single_form(self, form_id: str) -> Optional[CastorForm]:
"""Get a single CastorForm based on id."""
return self.forms_on_id.get(form_id)
def get_single_form_name(self, form_name: str) -> Optional[CastorForm]:
"""Get a single CastorForm based on id."""
return self.forms_on_name.get(form_name)
def add_record(self, record: CastorRecord) -> None:
"""Add a CastorRecord to the study."""
self.records[record.record_id] = record
record.study = self
def get_all_records(self) -> List[CastorRecord]:
"""Get all linked CastorRecords."""
return list(self.records.values())
def get_single_record(self, record_id: str) -> Optional[CastorRecord]:
"""Get a single CastorRecord based on id."""
return self.records.get(record_id)
def get_all_steps(self) -> List[CastorStep]:
"""Get all linked CastorSteps."""
steps = list(
itertools.chain.from_iterable(
[value.get_all_steps() for key, value in self.forms_on_id.items()]
)
)
return steps
def get_single_step(self, step_id_or_name: str) -> Optional[CastorStep]:
"""Get a single CastorStep based on id or name."""
for form in self.get_all_forms():
# Search for step in each form
step = form.get_single_step(step_id_or_name)
# If step found (id and name are both unique)
if step is not None:
return step
# If step not found
return None
def get_all_fields(self) -> List[CastorField]:
"""Get all linked CastorFields."""
fields = list(
itertools.chain.from_iterable(
[value.get_all_fields() for key, value in self.forms_on_id.items()]
)
)
return fields
def get_single_field(self, field_id_or_name: str) -> Optional[CastorField]:
"""Get a single CastorField based on id or name."""
if field_id_or_name == "":
# Some Castor studies have fields for which the name can be empty
# These are nonsensical identifiers, so we can't search on these
return None
for form in self.get_all_forms():
for step in form.get_all_steps():
# Search for field in each step in each form
field = step.get_single_field(field_id_or_name)
# If field found (id and name are both unique)
if field is not None:
return field
# If field not found
return None
def get_all_study_fields(self) -> List[CastorField]:
"""Gets all linked study CastorFields."""
return self.__get_all_form_type_fields("Study")
def get_all_survey_fields(self) -> List[CastorField]:
"""Gets all linked survey CastorFields."""
return self.__get_all_form_type_fields("Survey")
def get_all_report_fields(self) -> List[CastorField]:
"""Gets all linked report CastorFields."""
return self.__get_all_form_type_fields("Report")
def __get_all_form_type_fields(self, form_type: str) -> List[CastorField]:
"""Gets all linked CastorFields belonging to form of form_type."""
fields = self.get_all_fields()
return [field for field in fields if field.step.form.form_type == form_type]
def get_all_form_instances(self) -> List["CastorFormInstance"]:
"""Returns all form instances"""
form_instances = list(
itertools.chain.from_iterable(
[
list(value.form_instances_ids.values())
for key, value in self.records.items()
]
)
)
return form_instances
def get_single_form_instance_on_id(
self,
record_id: str,
instance_id: str,
) -> Optional["CastorFormInstance"]:
"""Returns a single form instance based on id."""
return self.get_single_record(record_id).get_single_form_instance_on_id(
instance_id
)
def get_all_data_points(self) -> List["CastorDataPoint"]:
"""Returns all data_points of the study"""
data_points = list(
itertools.chain.from_iterable(
[value.get_all_data_points() for key, value in self.records.items()]
)
)
return data_points
def get_single_data_point(
self, record_id: str, form_instance_id: str, field_id_or_name: str
) -> Optional["CastorDataPoint"]:
"""Returns a single data_point based on id."""
form_instance = self.get_single_form_instance_on_id(record_id, form_instance_id)
return form_instance.get_single_data_point(field_id_or_name)
def instance_of_form(
self, instance_id: str, instance_type: str
) -> Optional[CastorForm]:
"""Returns the form of which the given id is an instance.
instance_id is id for type: Report, name for type: Survey, or id for type: Study"""
if instance_type == "Study":
form = self.get_single_form(instance_id)
elif instance_type in ("Report", "Survey"):
form_id = self.form_links[instance_type][instance_id]
form = self.get_single_form(form_id)
else:
raise CastorException(f"{instance_type} is not a form type.")
return form
# PRIVATE HELPER FUNCTIONS
def __link_data(self) -> None:
"""Links the study data"""
# Get the data from the API
print("Downloading Study Data.", flush=True, file=sys.stderr)
data = self.client.export_study_data()
# Loop over all fields
for field in tqdm(data, desc="Mapping Data"):
self.__handle_row(field)
def __handle_row(self, field):
"""Handles a row from the export data."""
# Check if the record for the field exists, if not, create it
record = self.get_single_record(field["Record ID"])
if record is None:
record = CastorRecord(record_id=field["Record ID"])
self.add_record(record)
if field["Form Type"] == "":
# If the Form Type is empty, the line indicates a record
pass
else:
# If it is not empty, the line indicates data
self.__handle_data(field, record)
def __handle_data(self, field, record):
"""Handles data from a row from the export data"""
# First check what type of form and check if it exists else create it
if field["Form Type"] == "Study":
form_instance = self.__handle_study_form(field, record)
elif field["Form Type"] == "Report":
form_instance = self.__handle_report_form(field, record)
elif field["Form Type"] == "Survey":
form_instance = self.__handle_survey_form(field, record)
else:
raise CastorException(f"Form Type: {field['Form Type']} does not exist.")
# Check if the field exists, if not, create it
if field["Field ID"] == "":
# No field ID means that the row indicates an empty report or survey
# Empty is a report or survey without any datapoints
pass
else:
self.__handle_data_point(field, form_instance)
def __handle_data_point(self, field, form_instance):
"""Handles the data point from the export data"""
# Check if the data point already exists
# Should not happen, but just in case
data_point = form_instance.get_single_data_point(field["Field ID"])
if data_point is None:
data_point = CastorDataPoint(
field_id=field["Field ID"],
raw_value=field["Value"],
study=self,
filled_in=field["Date"],
)
form_instance.add_data_point(data_point)
else:
raise CastorException("Duplicated data point found!")
def __handle_survey_form(self, field, record):
form_instance = record.get_single_form_instance_on_id(field["Form Instance ID"])
if form_instance is None:
form_instance = CastorSurveyFormInstance(
instance_id=field["Form Instance ID"],
name_of_form=field["Form Instance Name"],
study=self,
)
record.add_form_instance(form_instance)
return form_instance
def __handle_report_form(self, field, record):
form_instance = record.get_single_form_instance_on_id(field["Form Instance ID"])
if form_instance is None:
form_instance = CastorReportFormInstance(
instance_id=field["Form Instance ID"],
name_of_form=field["Form Instance Name"],
study=self,
)
record.add_form_instance(form_instance)
return form_instance
def __handle_study_form(self, field, record):
instance_of_field = self.get_single_field(field["Field ID"])
instance_of_form = instance_of_field.step.form.form_id
form_instance_id = instance_of_form
form_instance = record.get_single_form_instance_on_id(form_instance_id)
if form_instance is None:
form_instance = CastorStudyFormInstance(
instance_id=form_instance_id,
name_of_form=field["Form Instance Name"],
study=self,
)
record.add_form_instance(form_instance)
return form_instance
def __export_study_data(self, archived) -> pd.DataFrame:
"""Returns a dataframe containing all study data."""
# Get study forms
forms = self.get_all_form_type_forms("Study")
df_study = self.__export_data(
forms,
[
"record_id",
"archived",
"institute",
"randomisation_group",
"randomisation_datetime",
],
"Study",
archived,
)
return df_study
def __export_survey_data(self, archived) -> Dict[str, pd.DataFrame]:
"""Returns a dict of dataframes containing all survey data."""
dataframes = {}
# Get survey forms
forms = self.get_all_form_type_forms("Survey")
# For each survey form, create a distinct dataframe
for form in forms:
dataframe = self.__export_data(
[form],
[
"record_id",
"institute",
"survey_name",
"survey_instance_id",
"created_on",
"sent_on",
"progress",
"completed_on",
"package_id",
"package_name",
"archived",
],
"Survey",
archived,
)
# Add to return dict
dataframes[form.form_name] = dataframe
return dataframes
def __export_report_data(self, archived):
"""Returns a dict of dataframes containing all report data."""
dataframes = {}
# Get survey forms
forms = self.get_all_form_type_forms("Report")
# For each survey form, create a distinct dataframe
for form in forms:
dataframe = self.__export_data(
[form],
[
"record_id",
"institute",
"created_on",
"custom_name",
"parent",
"archived",
],
"Report",
archived,
)
# Add to return dict
dataframes[form.form_name] = dataframe
return dataframes
def __export_data(
self,
forms: List["CastorForm"],
extra_columns: List[str],
form_type: str,
archived: bool,
) -> pd.DataFrame:
"""Exports given type of data and returns a dataframe."""
# Get all study fields
fields = self.__filtered_fields_forms(forms)
# Get all data points
if form_type == "Study":
data = self.__get_all_data_points_study(fields, archived)
elif form_type == "Survey":
data = self.__get_all_data_points_survey(forms[0], archived)
elif form_type == "Report":
data = self.__get_all_data_points_report(forms[0], archived)
else:
raise CastorException(
f"{form_type} is not a valid type. Use Study/Survey/Report."
)
# Order fields
sorted_fields = sorted(
fields,
key=attrgetter("step.form.form_order", "step.step_order", "field_order"),
)
# Define columns from study + auxiliary record columns
column_order = extra_columns + [field.field_name for field in sorted_fields]
# Convert study data points to data frame
dataframe = pd.DataFrame.from_records(data, columns=column_order)
# Split up checkbox and numberdate fields (multiple values in one column)
dataframe, column_order = self.__split_up_checkbox_data(
dataframe, fields, column_order
)
dataframe, column_order = self.__split_up_numberdate_data(
dataframe, fields, column_order
)
dataframe = self.__format_year(dataframe, fields)
dataframe = self.__format_categorical_fields(dataframe, fields)
# Order the dataframe
dataframe = dataframe[column_order]
return dataframe
def __format_categorical_fields(
self, dataframe: pd.DataFrame, fields: List[CastorField]
) -> pd.DataFrame:
"""Sets categorical fields to use categorical dtype."""
cat_fields = [
field for field in fields if field.field_type in ["dropdown", "radio"]
]
for field in cat_fields:
# Get options + missings
options = self.get_single_optiongroup(field.field_option_group)["options"]
# Remove duplicates
option_names = list(
set(
[option["name"] for option in options]
+ [
"measurement failed",
"not applicable",
"not asked",
"asked but unknown",
"not done",
]
)
)
# Set columns to categorical
cat_type = pd.CategoricalDtype(categories=option_names, ordered=False)
dataframe[field.field_name] = dataframe[field.field_name].astype(cat_type)
return dataframe
@staticmethod
def __format_year(
dataframe: pd.DataFrame, fields: List[CastorField]
) -> pd.DataFrame:
"""Casts year fields to the correct format."""
# Year fields to Ints
year_fields = [field for field in fields if field.field_type == "year"]
for year in year_fields:
dataframe[year.field_name] = dataframe[year.field_name].astype("Int64")
return dataframe
@staticmethod
def __split_up_numberdate_data(
dataframe: pd.DataFrame, fields: List[CastorField], column_order: List[str]
) -> (pd.DataFrame, List[str]):
"""Splits up the numberdate data in dummies and returns a new dataframe + column order."""
# Select numberdate fields
numberdate_fields = [
field for field in fields if field.field_type == "numberdate"
]
for numberdate in numberdate_fields:
# Create dummies
dummies = [
numberdate.field_name + "_number",
numberdate.field_name + "_date",
]
# Get an array of the data and double the nans
temp_list = dataframe[numberdate.field_name].tolist()
temp_list = [
[item, item] if not pd.Series(item).any() else item
for item in temp_list
]
# Add the dummies to the old data frame
dataframe[dummies] = | pd.DataFrame(temp_list, index=dataframe.index) | pandas.DataFrame |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"exponential": {"tau": 10},
}
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self, raw):
self._check_moment_func(np.median, name="median", raw=raw)
def test_rolling_min(self, raw):
self._check_moment_func(np.min, name="min", raw=raw)
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self, raw):
self._check_moment_func(np.max, name="max", raw=raw)
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_rolling_quantile(self, q, raw):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = pd.date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(
self, quantile, interpolation, data
):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param(self):
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile("foo")
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
def f(x):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning,
)
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name="apply", func=f, raw=raw)
def test_rolling_std(self, raw):
self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw)
self._check_moment_func(
lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw
)
def test_rolling_std_1obs(self):
vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self, raw):
self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw)
self._check_moment_func(
lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw
)
@td.skip_if_no_scipy
def test_rolling_skew(self, raw):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw)
@td.skip_if_no_scipy
def test_rolling_kurt(self, raw):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw)
def _check_moment_func(
self,
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
**kwargs,
):
# inject raw
if name == "apply":
kwargs = copy.copy(kwargs)
kwargs["raw"] = raw
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample("B").mean()
frame = self.frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(series, window=win, min_periods=minp)
frame_result = get_result(frame, window=win, min_periods=minp)
else:
series_result = get_result(series, window=win, min_periods=0)
frame_result = get_result(frame, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1, min_periods=minp)
expected = get_result(self.series, len(self.series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1, min_periods=0)
expected = get_result(self.series, len(self.series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=minp, center=True
)
frame_rs = get_result(
self.frame, window=25, min_periods=minp, center=True
)
else:
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=0, center=True
)
frame_rs = get_result(self.frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestRollingMomentsConsistency(Base):
def setup_method(self, method):
self._create_data()
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(self, func):
check_pairwise_moment(self.frame, "rolling", func, window=10, min_periods=5)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{
k: getattr(self.frame[k].rolling(window=10), method)(frame2[k])
for k in self.frame
}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name,
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
_flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from copy import deepcopy
from typing import List, Optional, Callable, Dict, Set
from enum import IntEnum
import pandas as pd
import sys
from abc import abstractmethod
class RecordInterface: # To avoid conflicts with the pybind metaclass, ABC is not used.
@abstractmethod
def equals(self, other: RecordInterface) -> bool:
pass
@abstractmethod
def merge(self, other: RecordInterface, inplace=False) -> Optional[Record]:
pass
@abstractmethod
def drop_columns(self, columns: List[str], inplace: bool = False) -> Optional[Record]:
pass
@abstractmethod
def add(self, key: str, stamp: int) -> None:
pass
@abstractmethod
def change_dict_key(self, old_key: str, new_key: str) -> None:
pass
@abstractmethod
def get(self, key: str) -> int:
pass
@property
@abstractmethod
def data(self) -> Dict[str, int]:
pass
@property
@abstractmethod
def columns(self) -> Set[str]:
pass
class RecordsInterface: # To avoid conflicts with the pybind metaclass, ABC is not used.
@abstractmethod
def equals(self, other: RecordsInterface) -> bool:
pass
@abstractmethod
def append(self, other: RecordInterface) -> None:
pass
@abstractmethod
def concat(self, other: RecordsInterface, inplace=False) -> Optional[RecordsInterface]:
pass
@abstractmethod
def sort(
self, key: str, sub_key: Optional[str] = None, inplace=False, ascending=True
) -> Optional[RecordsInterface]:
pass
@abstractmethod
def filter(
self, f: Callable[[RecordInterface], bool], inplace: bool = False
) -> Optional[RecordsInterface]:
pass
@abstractmethod
def copy(self) -> RecordsInterface:
pass
@property
@abstractmethod
def data(self) -> List[RecordInterface]:
pass
@abstractmethod
def drop_columns(
self, columns: List[str], inplace: bool = False
) -> Optional[RecordsInterface]:
pass
@abstractmethod
def rename_columns(
self, columns: Dict[str, str], inplace: bool = False
) -> Optional[RecordsInterface]:
pass
@property
@abstractmethod
def columns(self) -> Set[str]:
pass
@abstractmethod
def to_dataframe(self) -> pd.DataFrame:
pass
@abstractmethod
def to_string(self) -> str:
pass
@abstractmethod
def merge(
self,
right_records: Records,
join_key: str,
how: str = "inner",
left_record_sort_key: Optional[str] = None,
right_record_sort_key: Optional[str] = None,
*,
progress_label: Optional[str] = None
) -> Records:
pass
@abstractmethod
def merge_sequencial(
self,
right_records: Records,
left_stamp_key: str,
right_stamp_key: str,
join_key: Optional[str],
how: str = "inner",
*,
progress_label: Optional[str] = None
) -> Records:
pass
@abstractmethod
def merge_sequencial_for_addr_track(
self,
source_stamp_key: str,
source_key: str,
copy_records: RecordsInterface,
copy_stamp_key: str,
copy_from_key: str,
copy_to_key: str,
sink_records: RecordsInterface,
sink_stamp_key: str,
sink_from_key: str,
*,
progress_label: Optional[str] = None
) -> RecordsInterface:
pass
@abstractmethod
def clone(self):
pass
class MergeSideInfo(IntEnum):
LEFT = 0
RIGHT = 1
# class Record(collections.UserDict, RecordInterface):
class Record(RecordInterface):
def __init__(self, init: Optional[Dict] = None):
init = init or {}
self._data = init or {}
self._columns = set(init.keys())
def get(self, key: str) -> int:
return self._data[key]
@property
def data(self) -> Dict[str, int]:
return self._data
@property
def columns(self) -> Set[str]:
return self._columns
def drop_columns(self, columns: List[str], inplace=False) -> Optional[Record]:
data: Dict[str, int]
if inplace:
data = self._data
else:
data = deepcopy(self)._data
for column in columns:
if column not in self.columns:
continue
del data[column]
if inplace:
self._columns -= set(columns)
return None
else:
return Record(data)
def equals(self, other: Record) -> bool: # type: ignore
is_columns_equal = self.columns == other.columns
if is_columns_equal is False:
return False
return self.data == other.data
def add(self, key: str, stamp: int):
self.columns.add(key)
self._data[key] = stamp
def merge(self, other: Record, inplace=False) -> Optional[Record]: # type: ignore
if inplace:
self._data.update(other.data)
self._columns |= other.columns
return None
else:
d = deepcopy(self.data)
d.update(deepcopy(other.data))
return Record(d)
def change_dict_key(self, old_key: str, new_key: str) -> None:
self._data[new_key] = self._data.pop(old_key, None)
self._columns -= set([old_key])
self._columns |= set([new_key])
# class Records(collections.UserList, RecordsInterface):
class Records(RecordsInterface):
def __init__(self, init: Optional[List[Record]] = None):
self._columns: Set[str] = set()
for record in init or []:
self._columns |= record.columns
self._data: List[Record] = init or []
@property
def columns(self) -> Set[str]:
return self._columns
def sort(
self, key: str, sub_key: Optional[str] = None, inplace=False, ascending=True
) -> Optional[Records]:
if inplace:
data = self.data
else:
data = deepcopy(self.data)
if ascending:
if sub_key is None:
data.sort(key=lambda record: record.get(key))
else:
data.sort(
key=lambda record: (record.get(key), record.get(sub_key)) # type: ignore
)
else:
if sub_key is None:
data.sort(key=lambda record: -record.get(key))
else:
data.sort(
key=lambda record: (-record.get(key), -record.get(sub_key)) # type: ignore
)
if inplace:
return None
else:
return Records(data)
def copy(self) -> Records:
return deepcopy(self)
@property
def data(self) -> List[Record]: # type: ignore
return self._data
def append(self, other: Record): # type: ignore
assert isinstance(other, Record)
self._data.append(other)
self._columns |= other.columns
def concat(self, other: Records, inplace=False) -> Optional[Records]: # type: ignore
if inplace:
self._data += other._data
self._columns |= other.columns
return None
else:
d = deepcopy(self._data)
d += deepcopy(other._data)
return Records(d)
def drop_columns(self, columns: List[str], inplace: bool = False) -> Optional[Records]:
data: List[Record]
if inplace:
data = self._data
else:
data = deepcopy(self._data)
for record in data:
record.drop_columns(columns, inplace=True)
if not inplace:
return Records(data)
else:
self._columns -= set(columns)
return None
def rename_columns(self, columns: Dict[str, str], inplace: bool = False) -> Optional[Records]:
self._columns -= set(columns.keys())
self._columns |= set(columns.values())
data: List[Record]
if inplace:
data = self._data
else:
data = deepcopy(self._data)
for record in data:
for key_from, key_to in columns.items():
if key_from not in record.columns:
continue
record.change_dict_key(key_from, key_to)
if not inplace:
return Records(data)
else:
return None
def filter(self, f: Callable[[Record], bool], inplace: bool = False) -> Optional[Records]:
records = Records()
init_columns = self.columns
for record in self._data: # type: Record
if f(record):
records.append(record)
if not inplace:
records._columns = init_columns
records._data = deepcopy(records._data)
return records
else:
self._data = records._data
return None
def equals(self, records: Records) -> bool: # type: ignore
for r, r_ in zip(self.data, records.data):
if r.equals(r_) is False:
return False
if self._columns != records._columns:
return False
return True
def to_dataframe(self) -> pd.DataFrame:
pd_dict = [record.data for record in self.data]
df = | pd.DataFrame.from_dict(pd_dict) | pandas.DataFrame.from_dict |
"""
Test the creation and utilization of a SparkContext.
"""
import pandas as pd
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext
if __name__ == "__main__":
sc = SparkContext.getOrCreate()
sqlContext = SQLContext(sc)
spark = sqlContext.sparkSession
df = | pd.DataFrame([1, 2, 3, 4, 5], columns=['a']) | pandas.DataFrame |
'''
Created on 13.04.2018
@author: malte
'''
import gc
import pickle
import random
import time
from skopt import gbrt_minimize
from skopt import gp_minimize
from algorithms.baseline.Solution import Solution
from algorithms.hybrid.Weighted import Weighted
from helper import inout
from helper.eval import evaluate
import numpy as np
import pandas as pd
NUM_RECOMMENDATIONS=500
# data folder
FOLDER_TRAIN = 'data/data_formatted_50k/'
FOLDER_TEST = 'data/sample_50k_similar/'
DKNN = FOLDER_TEST + 'results_knn_disk_tf-ratio-s50_idf-log10_cosine-s0_k1000.csv-fill-sol.csv'
IKNN = FOLDER_TEST + 'results_iknn-100-a.75-idfw.csv'
SKNN = FOLDER_TEST + 'results_sknn-1000-0-idfw.csv'
IMPLICIT = FOLDER_TEST + 'results_implicitopt-300f-10e-filter20.5.csv'
best = 0
bestr = 0
bestp = 0
bestn = 0
best_key = ""
def main():
train, test = inout.load_dataset( FOLDER_TRAIN, FOLDER_TEST, feather=True )
export_csv_base = 'opt_hybrid/test'
space = [(0.05,1), # nickweight
(0.05,1), # iknnweight
(0.05,1), # sknn
(0.05,1) # implicit
]
def objective_sv( popt ):
algs = {}
nicknn = Solution( DKNN )
iknn = Solution( IKNN )
sknn = Solution( SKNN )
implicit = Solution( IMPLICIT )
nickw, iknnw, sknnw, implicitw = popt
key = 'weighted-nicknn-iknn-sknn-implicit-{}-{}-{}-{}'.format( nickw, iknnw, sknnw, implicitw )
algs[key] = Weighted( [nicknn, iknn, sknn, implicit], [nickw, iknnw, sknnw, implicitw], training=True )
print( ' -- current run for {}:'.format(key) )
for k, v in algs.items():
tstart = time.time()
v.train( train, test=test )
print( ' -- trained {} in {}s'.format( k, (time.time() - tstart) ) )
results = {}
results_time = {}
for k, v in algs.items():
results[k] = {}
results[k]['playlist_id'] = []
results[k]['track_id'] = []
results[k]['confidence'] = []
results_time[k] = 0
tstart = time.time()
test['actions'].sort_values( 'playlist_id', inplace=True )
plidmap = pd.Series( index=list(test['actions'].playlist_id.unique()), data=range(len(test['actions'].playlist_id.unique())) )
start = np.r_[ 0, test['actions'].groupby('playlist_id').size().cumsum().values ]
tracks = test['actions'].track_id.values
artists = test['actions'].artist_id.values
done = 0
for row in list( zip( test['playlists']['playlist_id'], test['playlists']['name'], test['playlists']['num_tracks'], test['playlists']['num_samples'] ) ):
pid, name, ntracks, nsamples = row
num_hidden = ntracks - nsamples
if pid in plidmap:
sidx = plidmap[pid]
s = start[sidx]
e = start[sidx+1]
actions = tracks[s:e]
artist_ids = artists[s:e]
else:
actions = None
artist_ids = None
for k, v in algs.items():
tpredict = time.time()
res = v.predict( name, actions, playlist_id=pid, artists=artist_ids, num_hidden=num_hidden )
pt = time.time() - tpredict
results[k]['playlist_id'] += [pid]*len(res)
results[k]['track_id'] += list(res.track_id.values)
results[k]['confidence'] += list(res.confidence.values)
results_time[k] += pt
done += 1
if done % 1000 == 0:
print( ' -- finished {} of {} test lists in {}s'.format( done, len(test['playlists']), (time.time() - tstart) ) )
key = ""
for k, v in algs.items():
results[k] = pd.DataFrame.from_dict( results[k] )
results[k].to_csv( FOLDER_TEST + export_csv_base + k +'.csv' )
print( 'prediction time for {}: {}'.format( k, (results_time[k] / len( test['playlists'] ) ) ) )
key=k
rp, page, ndcg = eval( algs.keys(), export_csv_base )
page_norm = 1 - (page / 51)
sum = rp + page_norm + ndcg
global best, bestr, bestp, bestn, best_key
if sum > best: # new best found
best = sum
bestr = rp
bestp = page
bestn = ndcg
best_key = key
print('CURRENT BEST: ' + best_key)
print('WITH SUM: ' + str(best))
print('WITH RP@500: ' + str(bestr))
print('WITH PAGE@500: ' + str(bestp))
print('WITH NDCG@500: ' + str(bestn))
return -sum
res = gbrt_minimize(objective_sv, space)
def eval(list, basepath):
preloaded = inout.load_validation(FOLDER_TEST)
preloaded[0].sort_values( ['num_samples','name'], inplace=True )
all = pd.DataFrame()
all_parts = | pd.DataFrame() | pandas.DataFrame |
import sys
import numpy as np
import os
import pandas as pd
from sklearn import preprocessing
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score , roc_auc_score , log_loss
import sklearn.linear_model as lm
from sklearn.model_selection import GridSearchCV
import Stemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score, log_loss
from numpy import linalg as LA
from sklearn import neighbors
from sklearn.neural_network import MLPClassifier
from bs4 import BeautifulSoup
#import xgboost as xgb
import datetime as dt
# StemmedTfidfVectorizer
english_stemmer = Stemmer.Stemmer('en')
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: english_stemmer.stemWords(analyzer(doc))
def text_to_wordlist( review, remove_stopwords=False ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
text = BeautifulSoup(review,'html.parser').get_text()
#
# 2. Remove non-letters
text = re.sub("[^A-za-z0-9^,?!.\/'+-=]"," ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'scuse", " excuse ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\?", " ? ", text)
#
# 3. Convert words to lower case and split them
words = text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
# 5. Return a list
return(words)
def clean_text( text ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
#text = BeautifulSoup(review,'html.parser').get_text()
#
# 2. Remove non-letters
text = re.sub("[^A-za-z0-9^,?!.\/'+-=]"," ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'scuse", " excuse ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " _exclamationmark_ ", text)
text = re.sub(r"\?", " _questionmark_ ", text)
#
return text
def build_data_set(ngram=3,stem=False,max_features=2000,min_df=2,remove_stopwords=True):
train = | pd.read_csv('data/train.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Main data processing script, loads ChIP-exo transcription factor data and processes into interval based data.
Also creates the features used for the machine learning models.
Part of the Hyena Toolbox (see https://github.com/SysBioChalmers/Hyena)
@author: <NAME>; Chalmers University of Technology, Gothenburg Sweden
"""
import pandas as pd
import numpy as np
from datetime import datetime
def parseWigLike(selected_genes, selected_tf):
#load data
path_to_ChIPexo = 'Data_ChIPexo/'
data_Glu=[[x for x in line.rstrip('\n\r').split('\t')] for line in open(path_to_ChIPexo + selected_tf+'_Glu.wigLike')]
data_Glu = {x[0] + '_' + x[1] : float(x[2]) for x in data_Glu if x[0] in selected_genes}
data_Eth=[[x for x in line.rstrip('\n\r').split('\t')] for line in open(path_to_ChIPexo + selected_tf+'_Eth.wigLike')]
data_Eth = {x[0] + '_' + x[1] : float(x[2]) for x in data_Eth if x[0] in selected_genes}
combined_keys = list(set(list(data_Glu.keys()) + list(data_Eth.keys())))
combined_keys = [x.split('_') for x in combined_keys]
combined_keys.sort(key = lambda x : (x[0], int(x[1])))
tf_data = []
for gene, pos in combined_keys:
tf_data.append({'Gene' : gene,
'TF' : selected_tf,
'Pos' : int(pos) - 1000,
'Glu_Value' : data_Glu.get(gene + '_' + pos, 0),
'Eth_Value' : data_Eth.get(gene + '_' + pos, 0),
'Diff_Value' : data_Eth.get(gene + '_' + pos, 0) - data_Glu.get(gene + '_' + pos, 0)})
tf_data = | pd.DataFrame(tf_data) | pandas.DataFrame |
import pandas as pd
import plotly.graph_objs as go
####### STUDIES TRACES ######
def tech_indicator_plot(df, study, fig):
return eval(study)(df, fig)
def tech_indicator_subplot(df, study):
return eval(study)(df)
# Moving average
def moving_average_trace(df, fig):
MA = df["Close"].rolling(window=5).mean()
trace = go.Scatter(
x=df["Date"], y=MA, mode="lines", showlegend=False, name="MA",
line=dict(width=1)
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Exponential moving average
def e_moving_average_trace(df, fig):
EMA = df["Close"].rolling(window=20).mean()
trace = go.Scatter(
x=df["Date"], y=EMA, mode="lines", showlegend=False, name="EMA",
line=dict(width=1)
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Bollinger Bands
def bollinger_trace(df, fig, window_size=10, num_of_std=5):
price = df["Close"]
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
Lower_band = rolling_mean - (rolling_std * num_of_std)
trace = go.Scatter(
x=df["Date"], y=upper_band, mode="lines", showlegend=False, name="BB_upper",
line=dict(width=1)
)
trace2 = go.Scatter(
x=df["Date"], y=rolling_mean, mode="lines", showlegend=False, name="BB_mean",
line=dict(width=1)
)
trace3 = go.Scatter(
x=df["Date"], y=Lower_band, mode="lines", showlegend=False, name="BB_Lower",
line=dict(width=1)
)
fig.append_trace(trace, 1, 1) # plot in first row
fig.append_trace(trace2, 1, 1) # plot in first row
fig.append_trace(trace3, 1, 1) # plot in first row
return fig
# Accumulation Distribution
def accumulation_trace(df):
df["Volume"] = ((df["Close"] - df["Low"]) - (df["High"] - df["Close"])) / (
df["High"] - df["Low"]
)
trace = go.Scatter(
x=df["Date"], y=df["Volume"], mode="lines", showlegend=False, name="Accumulation",
line=dict(width=1)
)
return trace
# Commodity Channel Index
def cci_trace(df, ndays=5):
TP = (df["High"] + df["Low"] + df["Close"]) / 3
CCI = pd.Series(
(TP - TP.rolling(window=10, center=False).mean())
/ (0.015 * TP.rolling(window=10, center=False).std()),
name="cci",
)
trace = go.Scatter(
x=df["Date"], y=CCI, mode="lines", showlegend=False, name="CCI",
line=dict(width=1)
)
return trace
# Price Rate of Change
def roc_trace(df, ndays=5):
N = df["Close"].diff(ndays)
D = df["Close"].shift(ndays)
ROC = pd.Series(N / D, name="roc")
trace = go.Scatter(
x=df["Date"], y=ROC, mode="lines", showlegend=False, name="ROC",
line=dict(width=1)
)
return trace
# Stochastic oscillator %K
def stoc_trace(df):
SOk = pd.Series((df["Close"] - df["Low"]) / (df["High"] - df["Low"]), name="SO%k")
trace = go.Scatter(
x=df["Date"], y=SOk, mode="lines", showlegend=False, name="SO%k",
line=dict(width=1)
)
return trace
# Momentum
def mom_trace(df, n=5):
M = pd.Series(df["Close"].diff(n), name="Momentum_" + str(n))
trace = go.Scatter(
x=df["Date"], y=M, mode="lines", showlegend=False, name="MOM",
line=dict(width=1)
)
return trace
# Pivot points
def pp_trace(df, fig):
PP = | pd.Series((df["High"] + df["Low"] + df["Close"]) / 3) | pandas.Series |
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert np.isnan(sparse.at['c'])
assert sparse.at['d'] == orig.at['d']
assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert sparse.at['c'] == orig.at['c']
assert sparse.at['d'] == orig.at['d']
assert sparse.at['e'] == orig.at['e']
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1., 2., 3.]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2., 3., 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype='float64')).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer],)
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries(
[1, 3], index=['a', 'c'],
dtype=SparseDtype(np.float64, s.fill_value),
kind=kind
)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assert_raises_regex(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == orig[0]
assert np.isnan(sparse[1])
assert sparse[3] == orig[3]
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse['C', 0] == orig['C', 0]
assert np.isnan(sparse['A', 1])
assert np.isnan(sparse['B', 0])
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A'],
orig.loc['A'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# single element list (GH 15447)
result = sparse.loc[['A']]
exp = orig.loc[['A']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
| tm.assert_sp_series_equal(result, exp) | pandas.util.testing.assert_sp_series_equal |
import numpy as np
import pandas as pd
import os
import time
import scipy.stats as stats
from numpy.linalg import svd, lstsq
from sklearn.decomposition import PCA
from scipy.stats import linregress, f_oneway
import itertools
import sys
from statsmodels.nonparametric.smoothers_lowess import lowess
from tqdm import tqdm
from sklearn.preprocessing import scale
from sklearn.neighbors import NearestNeighbors
import math
import json
from ctypes import c_int
import pickle
from multiprocess import Pool, current_process, Manager
from functools import partial
from sklearn import preprocessing
class imputable:
"""Imputes missing data with K Nearest Neighbors based on user specified parameters.
This class generates an object from raw data and allows for preprocessing, imputation and output of that data.
Parameters
----------
filename : str
This is the path to the file containing the raw data.
missingness : float
This is the maximum allowable percentage of missingness expressed as a decimal. For example a value of 0.25 would mean that all rows for which more than one in four values are missing will be rejected.
Attributes
----------
data : dataframe
This is where the raw data read from filename is stored.
miss : float
This is where the maximum allowable percentage of missingness expressed as a decimal is stored.
pats : dict
This is where the set of missingness patterns found in the dataset will be stored later, initialized here.
"""
def __init__(self, filename, missingness, neighbors=10):
"""
Constructor, takes input data and missingness threshold and initializes imputable object.
This is the initialization function for imputation. It reads the input file of raw data and sets the user specified value for the missingness threshold and number of nearest neighbors.
"""
self.data = pd.read_csv(filename,sep='\t')
self.miss = float(missingness)
self.pats = {}
self.notdone = True
self.NN = neighbors
def deduplicate(self):
"""
Removes duplicate peptides.
Groups rows by peptide, if a peptide appears in more than one row it is removed.
"""
if (self.data[self.data.columns.values[1]][0][-2] == "T") & (self.data[self.data.columns.values[1]][0][-1].isdigit()):
self.data[self.data.columns.values[1]] = self.data[self.data.columns.values[1]].apply(lambda x: x.split('T')[0])
self.data = self.data.groupby(['Peptide','Protein']).mean()
todrop = []
for name, group in tqdm(self.data.groupby(level='Peptide')):
if len(group) > 1:
todrop.append(name)
self.data = self.data.drop(todrop)
def drop_missing(self):
"""Removes rows which are missing more data than the user specified missingness threshold."""
self.miss = np.rint(len(self.data.columns)*self.miss)
self.data = self.data[self.data.isnull().sum(axis=1)<=self.miss]
def impute(self,outname):
"""
Imputes missing data with KNN and outputs the results to the specified file.
First all of the missingness patterns present in the dataset are identified. Then those patterns are iterated over and for each pattern, missing values are imputed. Finally the dataset is reformed with the imputed values and output.
Parameters
----------
outname : str
Path to output file.
"""
def match_pat(l,i):
"""
finds all missingness patterns present in the dataset
For each row, if that row has a new missingness pattern, that pattern is added to the list, then whether the missingness pattern is new or not, the index of that row is assigned to the appropriate missingness pattern.
Parameters
----------
l : list
A row of data
i : int
the index of that row in the original dataset
"""
l = "".join(np.isnan(l).astype(int).astype(str))
if l not in self.pats.keys():
self.pats[l] = [i]
else:
self.pats[l].append(i)
def get_patterns(arr):
"""Calls match_pat on all rows of data"""
for ind, val in enumerate(arr):
match_pat(val,ind)
def sub_imputer(inds,pattern,origarr,comparr):
"""
single imputation process for a missingness pattern.
Drops columns missing in a given missingness pattern. Then finds nearest neighbors. Iterates over rows matching missingness pattern, getting indexes of nearest neighbors, averaging nearest neighbrs and replacing
missing values with corresponding averages.
Parameters
----------
inds : list
indexes of rows sharing the missingness pattern.
pattern : str
Binary representation of missingness pattern.
origarr : arr
original array of data with missing values
comparr : arr
Complete array of only rows with no missing values (complete cases).
Returns
-------
outa : arr
Imputed array for missingness pattern.
"""
#drop missing columns given missingness pattern
newarr = comparr[:,~np.array(list(pattern)).astype(bool)]
#fit nearest neighbors
nbrs = NearestNeighbors(n_neighbors=self.NN).fit(newarr)
outa = []
#iterate over rows matching missingness pattern
for rowind, row in enumerate(origarr[inds]):
outl = []
#get indexes of given rows nearest neighbors
indexes = nbrs.kneighbors([origarr[inds[rowind],~np.array(list(pattern)).astype(bool)]],return_distance=False)
#get array of nearest neighbors
means = np.mean(comparr[indexes[0][1:]], axis=0)
#iterate over entries in each row
for ind, v in enumerate(row):
if not np.isnan(v):
outl.append(v)
else:
outl.append(means[ind])
outa.append(outl)
return outa
def imputer(origarr, comparr):
"""
Calls sub_imputer on each missingness pattern and outputs the results to a dict.
Parameters
----------
origarr : arr
Original array with missing values.
comparr : arr
Complete array of only rows with no missing values (complete cases).
Returns
-------
outdict : dict
Dict of imputed data with index in the original dataset as the key and imputed data as the value.
"""
outdict = {}
for k in tqdm(self.pats.keys()):
temparr = sub_imputer(self.pats[k],k, origarr,comparr)
for ind, v in enumerate(temparr):
outdict[self.pats[k][ind]] = v
return outdict
datavals = self.data.values
#generate array of complete cases
comparr = datavals[~np.isnan(datavals).any(axis=1)]
#find missingness patterns
get_patterns(datavals)
#impute
out = imputer(datavals, comparr)
#reform dataframe with imputed values from outdict
meld = | pd.DataFrame.from_dict(out,orient='index') | pandas.DataFrame.from_dict |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
import time
import re
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物-菌检出-药敏一级第一张图数据
def get_first_lev_first_fig_date(engine):
res = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'生化': "select '生化' as 业务类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null group by substr(REQUESTTIME,1,7)",
'检查': " select '检查' as 业务类型 , count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null group by substr(EXAM_DATE,1,7) ",
'体温': " select '体温' as 业务类型 , count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where RECORDDATE is not null group by substr(RECORDDATE,1,7) ",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('rout_exam_temp_first_level_first_fig','figure'),
Output('rout_exam_temp_first_level_first_fig_data','data'),
Input('rout_exam_temp_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_exam_temp_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
if rout_exam_temp_first_level_first_fig_data is None:
rout_exam_temp_first_level_first_fig_data = {}
rout_exam_temp_first_level_first_fig = get_first_lev_first_fig_date(engine)
rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'] = rout_exam_temp_first_level_first_fig.to_json(orient='split', date_format='iso')
rout_exam_temp_first_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_first_fig_data['btime'] = btime
rout_exam_temp_first_level_first_fig_data['etime'] = etime
rout_exam_temp_first_level_first_fig_data = json.dumps(rout_exam_temp_first_level_first_fig_data)
else:
rout_exam_temp_first_level_first_fig_data = json.loads(rout_exam_temp_first_level_first_fig_data)
if db_con_url['hosname'] != rout_exam_temp_first_level_first_fig_data['hosname']:
rout_exam_temp_first_level_first_fig = get_first_lev_first_fig_date(engine)
rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'] = rout_exam_temp_first_level_first_fig.to_json(orient='split',date_format='iso')
rout_exam_temp_first_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_first_fig_data = json.dumps(rout_exam_temp_first_level_first_fig_data)
else:
rout_exam_temp_first_level_first_fig = pd.read_json(rout_exam_temp_first_level_first_fig_data['rout_exam_temp_first_level_first_fig'], orient='split')
rout_exam_temp_first_level_first_fig_data = dash.no_update
#
rout_exam_temp_first_level_first_fig = rout_exam_temp_first_level_first_fig[(rout_exam_temp_first_level_first_fig['month']>=btime) & (rout_exam_temp_first_level_first_fig['month']<=etime)]
rout_exam_temp_first_level_first_fig = rout_exam_temp_first_level_first_fig.sort_values(['month','业务类型'])
fig1 = px.line(rout_exam_temp_first_level_first_fig, x='month', y='num', color='业务类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
# 设置水平图例及位置
fig1.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig1.update_yaxes(title_text="业务数据量")
fig1.update_xaxes(title_text="时间")
return fig1,rout_exam_temp_first_level_first_fig_data
# ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取一级第二张图片数据
def get_first_lev_second_fig_date(engine):
res = pd.DataFrame(columns=['问题类型', 'num' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'体温测量时间缺失': f"select '体温测量时间缺失' as 问题类型 ,count(1) as num from TEMPERATURE where RECORDDATE is null ",
'生化检验申请时间缺失': f"select '生化检验申请时间缺失' as 问题类型 ,count(1) as num from ROUTINE2 where REQUESTTIME is null ",
'生化检验报告时间缺失': f"select '生化检验报告时间缺失' as 问题类型 ,count(1) as num from ROUTINE2 where REPORTTIME is null",
'检查时间为空': f"select '检查时间为空' as 问题类型 ,count(1) as num from exam where EXAM_DATE is null ",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新一级图二
@app.callback(
Output('rout_exam_temp_first_level_second_fig','figure'),
Output('rout_exam_temp_first_level_second_fig_data','data'),
Input('rout_exam_temp_first_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_exam_temp_first_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_exam_temp_first_level_second_fig_data is None:
rout_exam_temp_first_level_second_fig = get_first_lev_second_fig_date(engine)
rout_exam_temp_first_level_second_fig_data = {}
rout_exam_temp_first_level_second_fig_data['rout_exam_temp_first_level_second_fig'] = rout_exam_temp_first_level_second_fig.to_json( orient='split', date_format='iso')
rout_exam_temp_first_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_second_fig_data = json.dumps(rout_exam_temp_first_level_second_fig_data)
else:
rout_exam_temp_first_level_second_fig_data = json.loads(rout_exam_temp_first_level_second_fig_data)
if db_con_url['hosname'] != rout_exam_temp_first_level_second_fig_data['hosname']:
rout_exam_temp_first_level_second_fig = get_first_lev_second_fig_date(engine)
rout_exam_temp_first_level_second_fig_data = {}
rout_exam_temp_first_level_second_fig_data[ 'rout_exam_temp_first_level_second_fig'] = rout_exam_temp_first_level_second_fig.to_json( orient='split', date_format='iso')
rout_exam_temp_first_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_exam_temp_first_level_second_fig_data = json.dumps(rout_exam_temp_first_level_second_fig_data)
else:
rout_exam_temp_first_level_second_fig = pd.read_json( rout_exam_temp_first_level_second_fig_data['rout_exam_temp_first_level_second_fig'], orient='split')
rout_exam_temp_first_level_second_fig_data = dash.no_update
fig = go.Figure()
# fig = px.bar(rout_exam_temp_first_level_second_fig,x='问题类型',y='num',color_discrete_sequence=px.colors.qualitative.Dark24 )
fig.add_trace(
go.Bar(x=rout_exam_temp_first_level_second_fig['问题类型'], y=rout_exam_temp_first_level_second_fig['num'], name="问题类型",
marker_color=px.colors.qualitative.Dark24, )
)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="问题数量")
fig.update_xaxes(title_text="月份")
return fig, rout_exam_temp_first_level_second_fig_data
# 下载一级图二明细
@app.callback(
Output('rout_exam_temp_first_level_second_fig_detail', 'data'),
Input('rout_exam_temp_first_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
bus_dic = {
'体温测量时间缺失': f"select * from TEMPERATURE where RECORDDATE is null ",
'生化检验申请时间缺失': f"select * from ROUTINE2 where REQUESTTIME is null ",
'生化检验报告时间缺失': f"select * from ROUTINE2 where REPORTTIME is null",
'检查时间为空': f"select * from exam where EXAM_DATE is null ",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}时间缺失数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# # 获取体温二级第一张图数据
def get_second_lev_first_fig_date(engine,btime,etime):
res = pd.DataFrame(columns=['问题类型','num','momth'])
bus_dic = {
'体温测量值异常': f"select '体温测量值异常' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where (VALUE >46 or VALUE<34) and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量值缺失': f"select '体温测量值缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where VALUE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'科室缺失': f"select '科室缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where DEPT is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时机缺失': f"select '体温测量时机缺失' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where OUTSIDE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时间无时间点': f"select '检验测量时间无时间点' as 问题类型 ,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE where length(RECORDDATE)<19 and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' group by substr(RECORDDATE,1,7)",
'体温测量时间在出入院时间之外': f""" select '体温测量时间在出入院时间之外' as 问题类型,count(1) as num ,substr(RECORDDATE,1,7) as month from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
group by substr(RECORDDATE,1,7)
""",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus],con=engine))
return res
# 更新二级图一
@app.callback(
Output('temp_second_level_first_fig','figure'),
Output('temp_second_level_first_fig_data','data'),
Input('temp_second_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_first_level_second_fig(temp_second_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if temp_second_level_first_fig_data is None:
temp_second_level_first_fig_data = {}
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data['temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['hosname'] = db_con_url['hosname']
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
temp_second_level_first_fig_data = json.loads(temp_second_level_first_fig_data)
if db_con_url['hosname'] != temp_second_level_first_fig_data['hosname']:
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data['temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['hosname'] = db_con_url['hosname']
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
if temp_second_level_first_fig_data['btime'] != btime or temp_second_level_first_fig_data[ 'etime'] != etime:
temp_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
temp_second_level_first_fig_data[ 'temp_second_level_first_fig'] = temp_second_level_first_fig.to_json( orient='split', date_format='iso')
temp_second_level_first_fig_data['btime'] = btime
temp_second_level_first_fig_data['etime'] = etime
temp_second_level_first_fig_data = json.dumps(temp_second_level_first_fig_data)
else:
temp_second_level_first_fig = pd.read_json( temp_second_level_first_fig_data['temp_second_level_first_fig'], orient='split')
temp_second_level_first_fig_data = dash.no_update
temp_second_level_first_fig = temp_second_level_first_fig.sort_values(['month'])
fig = px.line(temp_second_level_first_fig, x="month", y="num", color='问题类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="体温测量数量", )
fig.update_xaxes(title_text="月份", )
return fig, temp_second_level_first_fig_data
# 下载二级图一明细
@app.callback(
Output('temp_second_level_first_fig_detail', 'data'),
Input('temp_second_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
bus_dic = {
'体温测量值异常': f"select * from TEMPERATURE where (VALUE >46 or VALUE<34) and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量值缺失': f"select * from TEMPERATURE where VALUE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'科室缺失': f"select * from TEMPERATURE where DEPT is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时机缺失': f"select * from TEMPERATURE where OUTSIDE is null and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时间无时间点': f"select * from TEMPERATURE where length(RECORDDATE)<19 and substr(RECORDDATE,1,7)>='{btime}' and substr(RECORDDATE,1,7)<='{etime}' ",
'体温测量时间在出入院时间之外': f""" select t1.*,t2.in_time as 入院时间,t2.out_time as 出院时间 from TEMPERATURE t1,overall t2 where
( t1.RECORDDATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.RECORDDATE<t2.IN_TIME or t1.RECORDDATE > t2.OUT_TIME )
and (substr(t1.RECORDDATE,1,7)>='{btime}' and substr(t1.RECORDDATE,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}体温问题数据明细.xlsx')
else:
return dash.no_update
#
# # # ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取生化检验三级第一张图数据
def get_third_lev_first_fig_date(engine,btime,etime):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['问题类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'标本缺失': f"select '标本缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SPECIMEN is null group by substr(REQUESTTIME,1,7)",
'检验项目缺失': f"select '检验项目缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RTYPE is null group by substr(REQUESTTIME,1,7)",
'检验结果缺失': f"select '检验结果缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RVALUE is null group by substr(REQUESTTIME,1,7)",
'院内外标识缺失': f"select '院内外标识缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and OUTSIDE is null group by substr(REQUESTTIME,1,7)",
'检验子项缺失': f"select '检验子项缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RITEM is null group by substr(REQUESTTIME,1,7)",
'定性结果缺失': f"select '定性结果缺失' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and ABNORMAL is null group by substr(REQUESTTIME,1,7)",
'申请时间大于等于报告时间': f"select '申请时间大于等于报告时间' as 问题类型 ,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where REQUESTTIME >= REPORTTIME and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' group by substr(REQUESTTIME,1,7)",
'申请时间在出入院时间之外': f""" select '申请时间在出入院时间之外' as 问题类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 t1,overall t2 where
( t1.REQUESTTIME is not null and t1.REPORTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
group by substr(REQUESTTIME,1,7)
""",
}
for bus in bus_dic:
res_数据时间缺失及汇总 = res_数据时间缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
return res_数据时间缺失及汇总
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('rout_third_level_first_fig','figure'),
Output('rout_third_level_first_fig_data','data'),
Input('rout_third_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(rout_third_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_third_level_first_fig_data is None:
rout_third_level_first_fig_data = {}
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data['rout_third_level_first_fig'] = rout_third_level_first_fig.to_json( orient='split', date_format='iso')
rout_third_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
rout_third_level_first_fig_data = json.loads(rout_third_level_first_fig_data)
if db_con_url['hosname'] != rout_third_level_first_fig_data['hosname']:
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data['rout_third_level_first_fig'] = rout_third_level_first_fig.to_json( orient='split', date_format='iso')
rout_third_level_first_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
if rout_third_level_first_fig_data['btime'] != btime or rout_third_level_first_fig_data[ 'etime'] != etime:
rout_third_level_first_fig = get_third_lev_first_fig_date(engine, btime, etime)
rout_third_level_first_fig_data[ 'rout_third_level_first_fig'] = rout_third_level_first_fig.to_json(orient='split', date_format='iso')
rout_third_level_first_fig_data['btime'] = btime
rout_third_level_first_fig_data['etime'] = etime
rout_third_level_first_fig_data = json.dumps(rout_third_level_first_fig_data)
else:
rout_third_level_first_fig = pd.read_json( rout_third_level_first_fig_data['rout_third_level_first_fig'], orient='split')
rout_third_level_first_fig_data = dash.no_update
rout_third_level_first_fig = rout_third_level_first_fig.sort_values(['month'])
fig = px.line(rout_third_level_first_fig,x='month',y='num',color='问题类型',color_discrete_sequence=px.colors.qualitative.Dark24 )
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="问题数量")
fig.update_xaxes(title_text="月份")
return fig, rout_third_level_first_fig_data
# 下载三级图一明细
@app.callback(
Output('rout_third_level_first_fig_detail', 'data'),
Input('rout_third_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'标本缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SPECIMEN is null ",
'检验项目缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RTYPE is null ",
'检验结果缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RVALUE is null ",
'院内外标识缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and OUTSIDE is null ",
'检验子项缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and RITEM is null ",
'定性结果缺失': f"select * from ROUTINE2 where REQUESTTIME is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and ABNORMAL is null ",
'申请时间大于等于报告时间': f"select * from ROUTINE2 where REQUESTTIME >= REPORTTIME and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' ",
'申请时间在出入院时间之外': f""" select t1.* ,t2.in_time as 入院时间,t2.out_time as 出院时间 from ROUTINE2 t1,overall t2 where
( t1.REQUESTTIME is not null and t1.REPORTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}生化检验问题数据明细.xlsx')
else:
return dash.no_update
# # # ----------------------------------------------------------------------------------------------------- 三级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取生化三级第二张图数据
def get_third_level_second_fig_date(engine,btime,etime):
res = pd.read_sql(f"select RTYPE as 生化检验类型,count(distinct CASEID||TESTNO||RTYPE) as num ,substr(REQUESTTIME,1,7) as month from ROUTINE2 where RTYPE is not null and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' group by RTYPE,substr(REQUESTTIME,1,7)",con=engine)
return res
# 更新生化三级第二张图
@app.callback(
Output('rout_third_level_second_fig','figure'),
Output('rout_third_level_second_fig_data','data'),
Input('rout_third_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(rout_third_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if rout_third_level_second_fig_data is None:
rout_third_level_second_fig_data = {}
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split', date_format='iso')
rout_third_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
rout_third_level_second_fig_data = json.loads(rout_third_level_second_fig_data)
if db_con_url['hosname'] != rout_third_level_second_fig_data['hosname']:
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split',date_format='iso')
rout_third_level_second_fig_data['hosname'] = db_con_url['hosname']
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
if rout_third_level_second_fig_data['btime'] != btime or rout_third_level_second_fig_data['etime'] != etime:
rout_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
rout_third_level_second_fig_data['rout_third_level_second_fig'] = rout_third_level_second_fig.to_json(orient='split',date_format='iso')
rout_third_level_second_fig_data['btime'] = btime
rout_third_level_second_fig_data['etime'] = etime
rout_third_level_second_fig_data = json.dumps(rout_third_level_second_fig_data)
else:
rout_third_level_second_fig = pd.read_json(rout_third_level_second_fig_data['rout_third_level_second_fig'], orient='split')
rout_third_level_second_fig_data = dash.no_update
rout_third_level_second_fig = rout_third_level_second_fig.sort_values(['month'])
# fig = px.line(rout_third_level_second_fig,x='month',y='num',color='生化检验类型',color_discrete_sequence=px.colors.qualitative.Dark24)
fig = px.bar(rout_third_level_second_fig,x='month',y='num',color='生化检验类型',color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig.update_yaxes(title_text="生化检验数量", )
fig.update_xaxes(title_text="月份", )
return fig,rout_third_level_second_fig_data
#
# # ----------------------------------------------------------------------------------------------------- 四级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取检查四级第一张图数据
def get_fourth_level_first_fig_date(engine,btime,etime):
res = pd.DataFrame(columns=['问题类型', 'num', 'month'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'检查类别缺失': f"select '检查类别缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_CLASS is null group by substr(EXAM_DATE,1,7)",
'检查部位缺失': f"select '检验部位缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_PARA is null group by substr(EXAM_DATE,1,7)",
'检查所见缺失': f"select '检查所见缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and DESCRIPTION is null group by substr(EXAM_DATE,1,7)",
'检查印象缺失': f"select '检查印象缺失' as 问题类型 ,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and IMPRESSION is null group by substr(EXAM_DATE,1,7)",
'检查时间在出入院时间之外': f""" select '检查时间在出入院时间之外' as 问题类型,count(1) as num ,substr(EXAM_DATE,1,7) as month from EXAM t1,overall t2 where
( t1.EXAM_DATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.EXAM_DATE<t2.IN_TIME or t1.EXAM_DATE > t2.OUT_TIME )
and (substr(t1.EXAM_DATE,1,7)>='{btime}' and substr(t1.EXAM_DATE,1,7)<='{etime}')
group by substr(EXAM_DATE,1,7)
""",
}
for bus in bus_dic:
res = res.append(pd.read_sql(bus_dic[bus], con=engine))
return res
# 四级第一张图更新
@app.callback(
Output('exam_fourth_level_first_fig','figure'),
Output('exam_fourth_level_first_fig_data', 'data'),
Input('exam_fourth_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(exam_fourth_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if exam_fourth_level_first_fig_data is None:
exam_fourth_level_first_fig_data = {}
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json( orient='split', date_format='iso')
exam_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
exam_fourth_level_first_fig_data = json.loads(exam_fourth_level_first_fig_data)
if db_con_url['hosname'] != exam_fourth_level_first_fig_data['hosname']:
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
if exam_fourth_level_first_fig_data['btime'] != btime or exam_fourth_level_first_fig_data['etime'] != etime:
exam_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'] = exam_fourth_level_first_fig.to_json(orient='split', date_format='iso')
exam_fourth_level_first_fig_data['btime'] = btime
exam_fourth_level_first_fig_data['etime'] = etime
exam_fourth_level_first_fig_data = json.dumps(exam_fourth_level_first_fig_data)
else:
exam_fourth_level_first_fig = pd.read_json( exam_fourth_level_first_fig_data['exam_fourth_level_first_fig'], orient='split')
exam_fourth_level_first_fig_data = dash.no_update
exam_fourth_level_first_fig = exam_fourth_level_first_fig.sort_values(['month'])
fig = px.line(exam_fourth_level_first_fig, x="month", y="num", color='问题类型', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="问题数量", )
fig.update_xaxes(title_text="月份", )
return fig,exam_fourth_level_first_fig_data
# 下载四级图一明细
@app.callback(
Output('exam_fourth_level_first_fig_detail', 'data'),
Input('exam_fourth_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'检查类别缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_CLASS is null ",
'检查部位缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and EXAM_PARA is null ",
'检查所见缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and DESCRIPTION is null ",
'检查印象缺失': f"select * from EXAM where EXAM_DATE is not null and substr(EXAM_DATE,1,7)>='{btime}' and substr(EXAM_DATE,1,7)<='{etime}' and IMPRESSION is null ",
'检查时间在出入院时间之外': f""" select t1.* ,t2.in_time as 入院时间,t2.out_time as 出院时间 from EXAM t1,overall t2 where
( t1.EXAM_DATE is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.EXAM_DATE<t2.IN_TIME or t1.EXAM_DATE > t2.OUT_TIME )
and (substr(t1.EXAM_DATE,1,7)>='{btime}' and substr(t1.EXAM_DATE,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = | pd.ExcelWriter(output, engine='xlsxwriter') | pandas.ExcelWriter |
#!/usr/bin/env python3
"""Post-process
"""
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import argparse
import numpy as np
import yt
import glob
import pandas as pd
import time
from datetime import timedelta
# ========================================================================
#
# Function definitions
#
# ========================================================================
def parse_ic(fname):
"""
Parse the file written by PeleC to understand the initial condition
Returns a dictionary for easy acces
"""
# Read into dataframe
df = pd.read_csv(fname)
df.rename(columns=lambda x: x.strip(), inplace=True)
# convert to dictionary for easier access
return df.to_dict("records")[0]
# ========================================================================
def parse_output(fname):
walltime = 0
steps = 0
cfl = 0
dt = 0
with open(fname, "r") as f:
for line in f:
if "Running " in line:
cfl = float(line.split()[6])
dt = float(line.split()[-1])
elif "STEP =" in line:
steps = max(steps, int(line.split()[2]))
elif "Run time w/o init =" in line:
walltime = float(line.split()[-1])
return walltime, steps, cfl, dt
# ========================================================================
#
# Main
#
# ========================================================================
if __name__ == "__main__":
# Timer
start = time.time()
# Parse arguments
parser = argparse.ArgumentParser(description="A simple post-processing tool")
args = parser.parse_args()
# Setup
casedir = os.path.abspath("cases")
fdirs = sorted(
[
os.path.join(casedir, f)
for f in os.listdir(casedir)
if os.path.isdir(os.path.join(casedir, f))
]
)
oname = "out"
# Get the data
lst = []
for k, fdir in enumerate(fdirs):
# Initial conditions
ics = parse_ic(os.path.join(fdir, "ic.txt"))
# Get walltime, number of steps, cfl
walltime, steps, cfl, dt = parse_output(os.path.join(fdir, oname))
# Get plt directories
pltdirs = sorted(glob.glob(os.path.join(fdir, "plt*")))
# Initial condition
ds = yt.load(pltdirs[0])
max_level = ds.index.max_level
ref = int(np.product(ds.ref_factors[0:max_level]))
low = ds.domain_left_edge
L = (ds.domain_right_edge - ds.domain_left_edge).d
N = ds.domain_dimensions * ref
cube = ds.covering_grid(
max_level, left_edge=low, dims=N, fields=["x", "density", "velocity_x"]
)
u_0 = cube["x_velocity"].d
# Exact solution at initial
xmt = cube["x"].d - ics["v0"] * ds.current_time.d
ymt = cube["y"].d - ics["v0"] * ds.current_time.d
zmt = cube["z"].d - ics["v0"] * ds.current_time.d
u_e0 = ics["v0"] + ics["v0"] * np.sin(ics["omega_x"] * xmt / ics["L"]) * np.cos(
ics["omega_y"] * ymt / ics["L"]
) * np.cos(ics["omega_z"] * zmt / ics["L"])
# Final time
ds = yt.load(pltdirs[-1])
max_level = ds.index.max_level
ref = int(np.product(ds.ref_factors[0:max_level]))
low = ds.domain_left_edge
L = (ds.domain_right_edge - ds.domain_left_edge).d
N = ds.domain_dimensions * ref
dx = ds.index.get_smallest_dx().d
cube = ds.covering_grid(
max_level,
left_edge=low,
dims=N,
fields=["x", "y", "z", "density", "velocity_x"],
)
u_f = cube["x_velocity"].d
# Exact solution at final time
xmt = cube["x"].d - ics["v0"] * ds.current_time.d
ymt = cube["y"].d - ics["v0"] * ds.current_time.d
zmt = cube["z"].d - ics["v0"] * ds.current_time.d
u_ef = ics["v0"] + ics["v0"] * np.sin(ics["omega_x"] * xmt / ics["L"]) * np.cos(
ics["omega_y"] * ymt / ics["L"]
) * np.cos(ics["omega_z"] * zmt / ics["L"])
# Calculate the L2 error norm
e0 = np.sqrt(np.mean((u_0 - u_e0) ** 2))
ef = np.sqrt(np.mean((u_f - u_ef) ** 2))
lst.append(
{
"N": N[0],
"L20": e0,
"L2": ef,
"walltime": walltime,
"steps": steps,
"dx": dx,
"dt": dt,
"cfl": cfl,
}
)
# Concatenate all errors
df = | pd.DataFrame(lst) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from itertools import islice
import os
import pysam
import gzip
import pdb
import time
import argparse
import warnings
import sys
from tqdm.auto import tqdm
def genotype_finder(path_vcf, path_lookup_table,max_var,output_path,vcf_geno):
#looking for genotypes from vcf file
def genotypeOfHaplotypes(genotype_info):
# set 22 for nonphased genotypes
if not ("|" in genotype_info):
return "22"
genotype_info_Split = genotype_info.split("|")
genotype_hap1 = 0 if genotype_info_Split[0] == "0" else 1
genotype_hap2 = 0 if genotype_info_Split[1] == "0" else 1
return (str(genotype_hap1) + str(genotype_hap2))
def sample_column_map(path, start_col=9, line_key="#CHR"):
stream_in = gzip.open(path, "r")
out_map = {}
for line in stream_in:
if isinstance(line, bytes) and not isinstance(line, str):
line = line.decode()
if line_key in line:
line = line.rstrip().split("\t")
for i in range(start_col,len(line)):
out_map[line[i]] = i
break
stream_in.close()
return(out_map)
vcf_map = sample_column_map(os.path.expanduser(vcf_path))
tabix_vcf = pysam.Tabixfile(os.path.expanduser(vcf_path),"r")
for var_count in range(1,max_var+1):
lookup_table= pd.read_table(path_lookup_table+'haplotype_logExpression_var_'+str(var_count)+'_sort.txt', sep=" ")
lookup_table = lookup_table.reset_index(drop= True)
prediction_matrix_haplotype1= pd.DataFrame();
prediction_matrix_haplotype2= pd.DataFrame();
hap1_individual_genotype_lst = [];
hap2_individual_genotype_lst = [];
temp = pd.DataFrame();
individual_starting_column = 9
pred_index =1 ;
individuals = vcf_map.keys()
individuals_lst = list(vcf_map.keys())
genotype_info_hap1 = dict.fromkeys(np.arange(len(lookup_table)))
genotype_info_hap2 = dict.fromkeys(np.arange(len(lookup_table)))
count = 0
for index, row in tqdm(lookup_table.iterrows(),total=lookup_table.shape[0],position=0, leave=True, desc=" "+str(var_count)+"-eQTL genes"):
# tqdm.write(str(var_count)+"-eQTL genes", end = "\r")
columns = pd.Series();
columns = columns.append(pd.Series({'gene_id':row['gene_id']}))
if ('chrX' in row['variant_id_1']):# chrX won't be found in vcf file
for chrx_count in range(1,var_count+1):
var= 'variant_id_' + str(chrx_count)
columns = columns.append(pd.Series({var:row[var]}))
temp = columns.to_frame().T;
prediction_matrix_haplotype1= prediction_matrix_haplotype1.append(temp,ignore_index=True);
prediction_matrix_haplotype2= prediction_matrix_haplotype2.append(temp,ignore_index=True);
genotype_info_hap1[index] = ['2' for i in range(len(individuals_lst))]
genotype_info_hap2[index] = ['2' for i in range(len(individuals_lst))]
continue;
hap1_individual_genotype_lst.clear();
hap2_individual_genotype_lst.clear();
count = count + 1
# print (count, end='\r')
for i in range(1,var_count+1):
var= 'variant_id_' + str(i)
columns = columns.append(pd.Series({var:row[var]}))
temp = columns.to_frame().T;
var_split = row[var].split('_')
# reading vcf file
records = tabix_vcf.fetch(var_split[0],int(var_split[1])-1,int(var_split[1]))
snp_found = 0
for record in records:
cols = record.rstrip().split("\t")
if cols[2] == row[var]:
gt_index = cols[8].split(":").index(vcf_geno)
snp_found = 1
break
if (snp_found==0): # chrX won't be found
gt_index = 1000
print("WARNING: eSNP %s not found in VCF"%(row[var]))
hap_ind = 0;
for ind in individuals:
if (gt_index ==1000):
genotype_both = './.'
else:
sample_col = cols[vcf_map[ind]]
genotype_both = sample_col.split(":")[gt_index]
genotype_both_haps= genotypeOfHaplotypes(genotype_both);
if (i>1):
hap1_individual_genotype_lst.insert(hap_ind, hap1_individual_genotype_lst[hap_ind]+str(genotype_both_haps[0]) + ", ");
del hap1_individual_genotype_lst[hap_ind + 1];
hap2_individual_genotype_lst.insert(hap_ind, hap2_individual_genotype_lst[hap_ind]+str(genotype_both_haps[1]) + ", ");
del hap2_individual_genotype_lst[hap_ind + 1];
else:
hap1_individual_genotype_lst.insert(hap_ind, str(genotype_both_haps[0]) + ", ");
hap2_individual_genotype_lst.insert(hap_ind, str(genotype_both_haps[1]) + ", ");
hap_ind = hap_ind+1;
prediction_matrix_haplotype1= prediction_matrix_haplotype1.append(temp,ignore_index=True);
prediction_matrix_haplotype2= prediction_matrix_haplotype2.append(temp,ignore_index=True);
#Create genotype array to be inserted for each gene
clean_genotype_lst1 = [genotype[:-2] for genotype in hap1_individual_genotype_lst]
clean_genotype_lst2 = [genotype[:-2] for genotype in hap2_individual_genotype_lst]
#Do the insertion one gene at a time
genotype_info_hap1[index] = clean_genotype_lst1
genotype_info_hap2[index] = clean_genotype_lst2
output_genotype_hap1 = pd.concat([prediction_matrix_haplotype1, pd.DataFrame.from_dict(genotype_info_hap1, columns=individuals, orient='index')],axis = 1, sort = False);
output_genotype_hap2 = pd.concat([prediction_matrix_haplotype2, pd.DataFrame.from_dict(genotype_info_hap2, columns=individuals, orient='index')],axis = 1, sort = False);
output_genotype_hap1.to_csv(os.path.expanduser(output_path+'/genotype_hap1_var_count_'+str(var_count)+".csv"));
output_genotype_hap2.to_csv(os.path.expanduser(output_path+'/genotype_hap2_var_count_'+str(var_count)+".csv"));
def expr_pred(lookup_Table_path, max_var, path_genotype, output_path):
# predicting ASE and total expression
def Allelic_imbalance_predicted(hap1_count,hap2_count):
return hap1_count - hap2_count;
def ASE_predicted_value(hap1_count,hap2_count):
h1 = 2**hap1_count
htotal = 2**hap1_count + 2**hap2_count
p = h1 / htotal
h1_percent = round(p,2)
h2_percent = round(1 - h1_percent,2)
s = str(h1_percent)+"|"+str(h2_percent)
return s
def total_predicted_value(hap1_count,hap2_count):
hap1_count = 2**hap1_count;
hap2_count = 2**hap2_count;
return np.log2(hap1_count + hap2_count);
for var_count in range(1,max_var+1):
lookup_Table = pd.read_table(lookup_Table_path+"haplotype_logExpression_var_"+str(var_count)+".txt", sep=" ");
lookup_Table.columns = lookup_Table.columns.str.replace(',','').str.replace(" ", "");
genotype_haplotype1=pd.read_csv(path_genotype+"/genotype_hap1_var_count_"+str(var_count)+".csv")
genotype_haplotype2= pd.read_csv(path_genotype+"/genotype_hap2_var_count_"+str(var_count)+".csv")
start_index_individuals = var_count + 2;
individuals = genotype_haplotype1.columns.tolist()[start_index_individuals :]
# make empty dict
total_exp_output = dict.fromkeys(np.arange(len(genotype_haplotype1)))
ASE_exp_output = dict.fromkeys(np.arange(len(genotype_haplotype1)))
# make gene_ids seperately
genes = pd.DataFrame(np.arange(len(genotype_haplotype1)))
count = 0
total_count_lst = [];
ASE_count_lst = [];
for index, row in tqdm(genotype_haplotype1.iterrows(),total=genotype_haplotype1.shape[0],position=0, leave=True, desc=" "+str(var_count)+"-eQTL genes"):
total_count_lst.clear();
ASE_count_lst.clear();
count = count +1
# if ('chrX' in row['variant_id_1']):
# continue;
# print (var_count, " ", count, end='\r')
placeholder = list(np.where(lookup_Table['gene_id'] == row['gene_id']))[0] + 1; # index starts from 1 in lookup table
lookup_Table_index = placeholder [0];
genes.at[index,'gene_id'] = row['gene_id']
ind_index = 0
for ind in individuals:
if (not '2' in str(row[ind]) and not '2' in str(genotype_haplotype2.loc[index,ind])):
x1 = str(genotype_haplotype1.loc[index,ind]).replace(",","").replace(" ", "")
x2 = str(genotype_haplotype2.loc[index,ind]).replace(",","").replace(" ", "")
if (var_count == 1):
x1 = x1.split('.')[0]
x2 = x2.split('.')[0]
# read predicted value from lookup table
total_prediction = total_predicted_value(lookup_Table.loc[lookup_Table_index,x1],lookup_Table.loc[lookup_Table_index,x2])
total_count_lst.insert(ind_index,round(total_prediction,4))
ASE_prediction = ASE_predicted_value(lookup_Table.loc[lookup_Table_index,x1],lookup_Table.loc[lookup_Table_index,x2])
ASE_count_lst.insert(ind_index,ASE_prediction)
else:
total_count_lst.insert(ind_index,np.nan)
ASE_count_lst.insert(ind_index,np.nan)
ind_index = ind_index + 1
total_count_lst1 = [expr for expr in total_count_lst]
ASE_count_lst1 = [expr for expr in ASE_count_lst]
total_exp_output[index] = total_count_lst1
ASE_exp_output[index] = ASE_count_lst1
# make different dicts for different number of eQTLs
globals()['total_exp_output_df_%s'%var_count] = pd.concat([genes, | pd.DataFrame.from_dict(total_exp_output, columns=individuals, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
This file is part of the Shotgun Lipidomics Assistant (SLA) project.
Copyright 2020 <NAME> (UCLA), <NAME> (UCLA)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
#os.environ["MODIN_ENGINE"] = "dask"
import pandas as pd
#import modin.pandas as pd
import numpy as np
from pyopenms import *
import glob
import re
from pyopenms import *
import uuid
import tkinter as tk
from tkinter import ttk
from pandastable import Table, TableModel
from tkinter import filedialog
pd.set_option('display.float_format', '{:,.5E}'.format)
class App:
def __init__(self):
self.root = tk.Tk()
self.root.title("IntensityViewer")
self.dirloc = tk.Text(width=30, height=2, state='disabled')
self.dirloc.grid(column=1, row=0, columnspan=1, sticky='nw', padx=2)
self.button = ttk.Button(text='Set Directory', command=lambda: self.set_dir_read())
self.button.grid(column=0, row=0, sticky='nw')
self.stdkey = tk.Text(width=30, height=2, state='disabled')
self.stdkey.grid(column=1, row=1, columnspan=1, sticky='nw', padx=2)
self.button2 = ttk.Button(text='StandardKey', command=lambda: self.stdkey_read())
self.button2.grid(column=0, row=1, sticky='nw')
self.spname = tk.Text(width=30, height=2, state='disabled')
self.spname.grid(column=1, row=2, columnspan=1, sticky='nw', padx=2)
self.button3 = ttk.Button(text='Spname', command=lambda: self.spname_read())
self.button3.grid(column=0, row=2, sticky='nw')
self.button4 = ttk.Button(text='Read',
command=lambda: self.readmzml())
self.button4.grid(column=0, row=3, sticky='nw')
self.tree = ttk.Treeview(columns=('Outliers', 'Min', 'Mean', 'Max', 'Coef.Var'))
self.tree.heading('#0', text='ID')
self.tree.heading('Outliers', text='Outliers')
self.tree.heading('Min', text='Min')
self.tree.heading('Mean', text='Mean')
self.tree.heading('Max', text='Max')
self.tree.heading('Coef.Var', text='Coef.Var')
self.tree.column('#0', width=50, anchor='center', stretch=False)
self.tree.column('Outliers', width=50, anchor='center', stretch=False)
self.tree.column('Min', width=50, anchor='center', stretch=False)
self.tree.column('Mean', width=50, anchor='center', stretch=False)
self.tree.column('Max', width=50, anchor='center', stretch=False)
self.tree.column('Coef.Var', width=60, anchor='center', stretch=False)
self.tree.grid(column=0, row=4, columnspan=2,
sticky='nwe', pady=3, padx=3)
self.tree.bind("<Double-1>", self.OnDoubleClick)
# Constructing vertical scrollbar
# with treeview
self.verscrlbar = ttk.Scrollbar(self.root,
orient="vertical", command=self.tree.yview)
# config bar
self.verscrlbar.grid(column=2, row=4, sticky='nws', pady=3, padx=0)
# self.tree.config(yscrollcommand = self.verscrlbar.set)
# self.verscrlbar.config(command=self.tree.yview)
# Constructing horizontal scrollbar
# with treeview
self.horscrlbar = ttk.Scrollbar(self.root,
orient="horizontal", command=self.tree.xview)
# config bar
self.horscrlbar.grid(column=0, row=5, columnspan=2, sticky='enw', pady=0, padx=5)
self.tree.config(yscrollcommand=self.verscrlbar.set)
self.tree.config(xscrollcommand=self.horscrlbar.set)
self.f = tk.Frame()
self.f.grid(column=3, row=0, rowspan=10, columnspan=10, sticky='nsew')
# self.f.columnconfigure(10, weight=1)
# self.f.rowconfigure(10, weight=1)
self.root.columnconfigure(3, weight=1)
self.root.rowconfigure(5, weight=1)
self.root.mainloop()
def OnDoubleClick(self, event):
item = self.tree.selection()[0]
print("you clicked on", self.tree.item(item, 'text'))
method = self.tree.item(item, 'values')[-1]
samp = self.tree.item(item, 'text')
df = self.data[method][samp].copy() # self.data['1']['54']
df.iloc[:, 4:24] = df.iloc[:, 4:24].applymap('{:,.5E}'.format) # scientific notation
pt = Table(self.f, dataframe=df, model=TableModel(df),
showtoolbar=True, showstatusbar=True, enable=True)
pt.show()
pt.redraw()
def set_dir_read(self):
self.dirloc.configure(state="normal")
self.dirloc.delete(1.0, 'end')
self.setdir = filedialog.askdirectory()
self.dirloc.insert('insert', self.setdir)
self.dirloc.configure(state="disabled")
def stdkey_read(self):
self.stdkey.configure(state="normal")
self.stdkey.delete(1.0, 'end')
self.setdir = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),
("all files", "*.*")))
self.stdkey.insert('insert', self.setdir)
self.stdkey.configure(state="disabled")
def spname_read(self):
self.spname.configure(state="normal")
self.spname.delete(1.0, 'end')
self.setdir = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),
("all files", "*.*")))
self.spname.insert('insert', self.setdir)
self.spname.configure(state="disabled")
def readmzml(self):
os.chdir(self.dirloc.get('1.0', 'end-1c'))
list_of_files = glob.glob('./*.mzML')
##def functions
def centered_average(row):
mesures = row # [6:26]
mesures = mesures[mesures != 0] # drop all 0s
if len(mesures) == 0:
mesures = [0]
mesures = np.nanmean(mesures)
return (mesures)
def stdNorm(row):
return (sp_df2['AvgIntensity'][sp_df2['Species'] == std_dict[method]['StdName'][row['Species']]].iloc[0])
# return(sp_df2['AvgIntensity'][sp_df2['Species'] == std_dict[method]['StdName'][row['Species']]].item())
def conCoef(row):
return (std_dict[method]['Coef'][row['Species']])
# def spName(row):
# return(sp_dict[method].loc[(pd.Series(sp_dict[method]['Q1'] == row['Q1']) & pd.Series(sp_dict[method]['Q3'] == row['Q3']))].index[0])
##create all variable
all_df_dict = {'1': {}, '2': {}}
# out_df2 = pd.DataFrame()
out_df2 = {'1': pd.DataFrame(), '2': pd.DataFrame()}
# out_df2_con = pd.DataFrame()
out_df2_con = {'1': pd.DataFrame(), '2': | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import to_datetime
def group_survival_table_from_events(groups, durations, censorship, limit=-1):
"""
Joins multiple event series together into dataframes. A generalization of
`survival_table_from_events` to data with groups. Previously called `group_event_series` pre 0.2.3.
Parameters:
groups: a (n,) array of individuals' group ids.
durations: a (n,) array of durations of each individual
censorship: a (n,) array of censorship, 1 if observed, 0 else.
Output:
- np.array of unique groups
- dataframe of removal count data at event_times for each group, column names are 'removed:<group name>'
- dataframe of observed count data at event_times for each group, column names are 'observed:<group name>'
- dataframe of censored count data at event_times for each group, column names are 'censored:<group name>'
Example:
#input
group_survival_table_from_events(waltonG, waltonT, np.ones_like(waltonT)) #data available in test_suite.py
#output
[
array(['control', 'miR-137'], dtype=object),
removed:control removed:miR-137
event_at
6 0 1
7 2 0
9 0 3
13 0 3
15 0 2
...,
observed:control observed:miR-137
event_at
6 0 1
7 2 0
9 0 3
13 0 3
15 0 2
...,
censored:control censored:miR-137
event_at
6 0 0
7 0 0
9 0 0
...,
]
"""
unique_groups = np.unique(groups)
# set first group
g = unique_groups[0]
ix = groups == g
T = durations[ix]
C = censorship[ix]
g_name = str(g)
data = survival_table_from_events(T, C, columns=['removed:' + g_name, "observed:" + g_name, 'censored:' + g_name])
for g in unique_groups[1:]:
ix = groups == g
T = durations[ix]
C = censorship[ix]
g_name = str(g)
data = data.join(survival_table_from_events(T, C, columns=['removed:' + g_name, "observed:" + g_name, 'censored:' + g_name]), how='outer')
data = data.fillna(0)
# hmmm pandas...its too bad I can't do data.ix[:limit] and leave out the if.
if int(limit) != -1:
data = data.ix[:limit]
return unique_groups, data.filter(like='removed:'), data.filter(like='observed:'), data.filter(like='censored:')
def survival_table_from_events(event_times, censorship, columns=["removed", "observed", "censored"], weights=None):
"""
Parameters:
event_times: (n,1) array of event times
censorship: if not None, (n,1) boolean array, 1 if observed event, 0 is censored
columns: a 3-length array to call the, in order, removed individuals, observed deaths
and censorships.
Returns:
Pandas DataFrame with index as the unique times in event_times. The columns named
'removed' refers to the number of individuals who were removed from the population
by the end of the period. The column 'observed' refers to the number of removed
individuals who were observed to have died (i.e. not censored.) The column
'censored' is defined as 'removed' - 'observed' (the number of individuals who
left the population due to censorship)
Example:
#input
survival_table_from_events( waltonT, np.ones_like(waltonT)) #available in test suite
#output
removed observed censored
event_at
6 1 1 0
7 2 2 0
9 3 3 0
13 3 3 0
15 2 2 0
"""
event_times = np.array(event_times)
df = pd.DataFrame(event_times.astype(float), columns=["event_at"])
df[columns[0]] = 1 if weights is None else weights
df[columns[1]] = censorship
event_table = df.groupby("event_at").sum().sort_index()
event_table[columns[2]] = event_table[columns[0]] - event_table[columns[1]]
return event_table
def survival_events_from_table(event_table, observed_deaths_col="observed", censored_col="censored"):
"""
This is the inverse of the function ``survival_table_from_events``.
Parameters
event_table: a pandas DataFrame with index as the durations (!!) and columns "observed" and "censored", referring to
the number of individuals that died and were censored at time t.
Returns
T: a np.array of durations of observation -- one element for each individual in the population.
C: a np.array of censorships -- one element for each individual in the population. 1 if observed, 0 else.
Ex: The survival table, as a pandas DataFrame:
observed censored
index
1 1 0
2 0 1
3 1 0
4 1 1
5 0 1
would return
T = np.array([ 1., 2., 3., 4., 4., 5.]),
C = np.array([ 1., 0., 1., 1., 0., 0.])
"""
columns = [observed_deaths_col, censored_col]
N = event_table[columns].sum().sum()
T = np.empty(N)
C = np.empty(N)
i = 0
for event_time, row in event_table.iterrows():
n = row[columns].sum()
T[i:i + n] = event_time
C[i:i + n] = np.r_[np.ones(row[columns[0]]), np.zeros(row[columns[1]])]
i += n
return T, C
def datetimes_to_durations(start_times, end_times, fill_date=datetime.today(), freq='D', dayfirst=False, na_values=None):
"""
This is a very flexible function for transforming arrays of start_times and end_times
to the proper format for lifelines: duration and censorship arrays.
Parameters:
start_times: an array, series or dataframe of start times. These can be strings, or datetimes.
end_times: an array, series or dataframe of end times. These can be strings, or datetimes.
These values can be None, or an empty string, which corresponds to censorship.
fill_date: the date to use if end_times is a None or empty string. This corresponds to last date
of observation. Anything above this date is also censored. Default: datetime.today()
freq: the units of time to use. See pandas 'freq'. Default 'D' for days.
day_first: convert assuming European-style dates, i.e. day/month/year.
na_values : Additional string to recognize as NA/NaN. Ex: ['']
Returns:
T: a array of floats representing the durations with time units given by freq.
C: a boolean array of censorship: 1 if death observed, 0 else.
"""
freq_string = 'timedelta64[%s]' % freq
start_times = pd.Series(start_times).copy()
end_times = pd.Series(end_times).copy()
start_times_ = to_datetime(start_times, dayfirst=dayfirst)
C = ~(pd.isnull(end_times).values + (end_times == "") + (end_times == na_values))
end_times[~C] = fill_date
"""
c = (to_datetime(end_times, dayfirst=dayfirst, coerce=True) > fill_date)
end_times[c] = fill_date
C += c
"""
end_times_ = | to_datetime(end_times, dayfirst=dayfirst, coerce=True) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 16:11:46 2020
@author: <NAME> and <NAME>
Input file: gff file
Output files: tsv file
Description: Used to extract annotations from gff file for each gene/transcript (transcript_id, transcript_name, gene_id, gene_name, gene_type)
"""
import argparse
import pandas as pd
import os
# function to extract annotations from host gff file
def extract_gene_types_host(gff,gene_feature,gene_attribute):
gene_type_host = [] # initialize list of dictionaries with genes/transcripts and their annotations
for line in open(gff): #looping through GFF file
d = line.rstrip() #remove '\n'
if ((d[0] != '#') and (d != '')): # skip both commented and empty lines
d = d.split('\t') # separating tabbed words into a list
if d[2] in gene_feature: # check if gene feature (3rd column of gff file) is present in list of features used in quantification
d1 = d[8].split(';') # split column 8 by ;
# extract ID of gene attribute
ID_pos_host = [pos.split('=') for pos in d1 if pos.startswith(gene_attribute)][0][1]
# extract gene type
gt_host = [pos.split('=') for pos in d1 if pos.startswith('gene_type')][0][1]
if d[0] == 'chrM': # if gene is located on Mitochondrial chromosome, set gene type as mitoRNA
g_type = 'mitoRNA'
elif gt_host.startswith('Pseudo_'): # if gene type contains Pseudo_ prefix, set gene type as pseudogene
g_type = 'pseudogene'
elif 'tRNA' in gt_host: # if gene type contains tRNA, set gene type as tRNA
g_type = 'tRNA'
else:
g_type = gt_host # set original gene type
# extract gene_id
ID_gene_pos_host = [pos.split(' ') for pos in d1 if pos.startswith('gene_id')][0][0].split("=")[1]
# extract gene_name
gene_name_host = [pos.split(' ') for pos in d1 if pos.startswith('gene_name')][0][0].split("=")[1]
# if quantification is performed at gene-level, store results for genes (in case of HTSeq, attribute is set to 'gene_id')
if gene_attribute.startswith('gene'):
gene_type_host.append({'gene_id':ID_gene_pos_host,'gene_name':gene_name_host, 'gene_type':g_type})
# store rusults for transcripts - in case of Salmon, attribute is set to 'parent'
else:
# extract transcript_name
transcript_name_host = [pos.split('=') for pos in d1 if pos.startswith('transcript_name')][0][1]
gene_type_host.append({'transcript_id' :ID_pos_host, 'transcript_name':transcript_name_host, 'gene_id':ID_gene_pos_host,'gene_name':gene_name_host, 'gene_type':g_type})
return gene_type_host
# function to extract annotations from pathogen gff file
def extract_gene_types_pathogen(gff,gene_feature,gene_attribute):
gene_type_pathogen = [] #create new list
for line in open(gff): #looping through GFF file
d = line.rstrip() #remove '\n'
if (not d.startswith('#') and (d != '')): #ignoring comments and blank lines
d = d.split('\t') #separating tabbed words into a list
if d[2] in gene_feature: #if values from 3rd col of GFF are in gene_feature
d1 = d[8].split(';') #split column 8 by ;
#Error handler to ignore rows that don't contain the same format as other rows
#This is a common issue in bacterial GTFs/GFFs which are often composed with non-uniform rows
try:
#further split contents from col 8 by '=', taking first occurance and 2nd value.
#ie, ID=1234 becomes 'ID', '1234', and '1234' is stored
ID_pos_pathogen = [pos.split('=') for pos in d1 if pos.startswith(gene_attribute)][0][1]
except Exception:
continue
#Search for field 'Name/name' and store contents
if ('name' or 'Name' in d1):
feature_name = [pos.split('=') for pos in d1 if pos.startswith(tuple(['name','Name']))]
if feature_name:
feature_name_pathogen = feature_name[0][1]
else:
feature_name_pathogen = ''
else:
feature_name_pathogen = ''
#Capture biotypes
if d[2] == 'sRNA':
g_type = 'sRNA'
elif d[2] == 'ncRNA':
g_type = 'ncRNA'
elif d[2] == 'tRNA':
g_type = 'tRNA'
elif d[2] == 'rRNA':
g_type = 'rRNA'
elif ([True for st in d1 if 'gene_biotype' in st]):
r_pos_host = [pos.split('=') for pos in d1 if pos.startswith('gene_biotype')]
g_type = r_pos_host[0][1]
else:
g_type = d[2]
gene_type_pathogen.append({gene_attribute: ID_pos_pathogen, 'name':feature_name_pathogen,'gene_type':g_type})
return gene_type_pathogen
parser = argparse.ArgumentParser(description="""extract annotations from gff""")
parser.add_argument("-gff", "--gff", metavar='<gff_annotations>', help="Path to gff file")
parser.add_argument("-f", "--gene_feature", nargs='+', help="gene features from 3rd column of gff file used for quantification")
parser.add_argument("-a", "--gene_attribute", help="gene attribute from 9th column of gff file")
parser.add_argument("-q_tool", "--quantifier", metavar='<quantifier>', help="name of quantifier")
parser.add_argument("-org", "--organism", help = 'host or pathogen')
parser.add_argument("-o", "--output", metavar='<output>', help="output file name")
args = parser.parse_args()
# create list of gene features used in quantification
gene_features = [feature.replace('[' , '').replace(']','').replace(',','') for feature in args.gene_feature]
if args.organism == 'pathogen':
# create dictionary of annotations for each gff entry with desired gene feature
gene_types = extract_gene_types_pathogen(args.gff,gene_features, args.gene_attribute)
# create data frame of quantified genes/transcripts with annotations (e.g. gene type and gene name)
gene_annotations_pathogen_df = | pd.DataFrame.from_dict(gene_types) | pandas.DataFrame.from_dict |
# coding: utf-8
# In[11]:
# First of all, we import all the necessary libs
import nltk
import re
import unicodedata
import string
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer
import pandas as pd
import inflect
import pickle
import math
from scipy.spatial import distance
import heapq
from geopy import geocoders
import numpy as np
from geopy import distance as geodis
from IPython.display import clear_output
from termcolor import colored
from IPython.display import Markdown
import matplotlib.pyplot as plt
import folium
# This strings open a connection to GeoPy Database in order to get cities and addresses coordinates knowing their name
gn = geocoders.GeoNames(username = "clabat9")
gl = geocoders.Nominatim( user_agent = "<PASSWORD>")
# ---------- SECTION 1 : DOCUMENTS PREPROCESSING ----------
# F1 : This function removes stop words from list of tokenized words
def remove_stopwords(wrd):
new_wrd = [] #List of updated words
for word in wrd:
if word not in stopwords.words('english'): # If the current word is not a stopword (ckeck using nltk)
new_wrd.append(word) #appends it to the list
return new_wrd
# F2 : This function removes punctuation from list of tokenized words
def remove_punctuation(wrd):
new_wrds = [] #List of updated words
for word in wrd:
new_wrd = re.sub(r'[^\w\s]', '', word) # Replaces all punctuation word with "" using RegEx
if new_wrd != '':
new_wrds.append(new_wrd) #And then appends all words different from "" to the list
return new_wrds
# F3 : This function stems words in a list of tokenized words
def stem_words(wrd):
stemmer = LancasterStemmer() # Selects the stemmmer from nltk
stems = [] # List of updated words
for word in wrd:
stem = stemmer.stem(word) # Stems the word
stems.append(stem) # and appends it to the list
return stems
# F4 : This functions removes non ascii chars from a list of tokenized words
def remove_non_ascii(wrd):
new_wrds = [] # List of updated words
for word in wrd:
new_wrd = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore') # Filters non ascii chars
new_wrds.append(new_wrd) # Appends the word to the list
return new_wrds
# F5 : This function converts all characters to lowercase from a list of tokenized words
def to_lowercase(wrd):
new_wrds = [] # List of updated words
for word in wrd:
new_wrd = word.lower() # Converts the current word to lower case
new_wrds.append(new_wrd) # And append it to the list
return new_wrds
# F5 : This function replaces all integers occurences in list of tokenized words with textual representation
def replace_numbers(wrd):
d = inflect.engine() # Libs inflect contains what we need
new_wrds = [] # List of updated words
for word in wrd:
if word.isdigit(): # If the current word is a number
new_wrd = d.number_to_words(word) # Converts it to its textual representation
new_wrds.append(new_wrd) # And appends it to the list
else:
new_wrds.append(word) # If the current word is not a number appends it to the list
return new_wrds
# The following function takes a record of a dataFrame containg our docs and preprocesses it's title and description
# with all the previous functions
def preProcessing (x):
x.fillna("*", inplace = True) # fills NA with "*"
xt = x["title"] # Takes title and description
xd = x["description"]
if xt != "*":
xt = nltk.word_tokenize(xt) # Tokenizes title using nltk
if xd != "*":
xd = nltk.word_tokenize(xd) # Tokenizes description using nltk
# Uses previous functions
xt = replace_numbers(xt)
xd = replace_numbers(xd)
xt = remove_stopwords(xt)
xd = remove_stopwords(xd)
xt = remove_punctuation(xt)
xd = remove_punctuation(xd)
xt = stem_words(xt)
xd = stem_words(xd)
xt = remove_non_ascii(xt)
xd = remove_non_ascii(xd)
xt = to_lowercase(xt)
xd = to_lowercase(xd)
x["title"] = xt
x["description"] = xd
return x # Returns the preprocessed doc
# This function takes the query and preprocecesses it with all the previous methods
def query_preProcessing (x):
xt = nltk.word_tokenize(x) # Tokenizes query using nltk
# Uses previous functions
xt = replace_numbers(xt)
xt = remove_stopwords(xt)
xt = remove_punctuation(xt)
xt = stem_words(xt)
xt = remove_non_ascii(xt)
xt = to_lowercase(xt)
return xt
# ---------- SECTION 2 : SPLITTING ----------
# F1 : This function takes a path of a DataFrame or the DataFrame it's self and exports each one of its rows as a .tsv file
# Important : if the function receives both the path and the DataFrame, it will use the DataFrame option.
# For our purposes it is not fundamental to guarantee that the file is in the specified path or that the df is consistent,
# but it's clear that in a more general context will be useful to insert some simple code to catch errors.
def csv_to_tsv(path_of_the_doc_to_convert = None ,pdata = pd.DataFrame()):
if not pdata.empty : # If it receives a DataFrame
pdata.to_csv("processed_data.tsv",encoding = "utf-8", sep = "\t") # Saves it as a .tsv
f = open("processed_data.tsv","r", encoding = "utf-8") # Loads it
leng = 0 # Counter of the number of documents
for line in f: # For each record (document)
with open(r"D:\Claudio\Uni\M 1° anno Sapienza\AMDS\Homeworks\Hw3\ptsv\doc_"+str(leng)+".tsv", "w", encoding = "utf-8" ) as ftmp:
ftmp.write(line) # Saves the record as .tsv
leng += 1 # Update counter
return leng # Returns the number of documents
else: # If it receives a path
data = open(path_of_the_doc_to_convert,"r", encoding = "utf-8") # Opens the data in the specified path
leng = 0 # And makes the same procedure above
for line in data:
with open(r"D:\Claudio\Uni\M 1° anno Sapienza\AMDS\Homeworks\Hw3\tsv\doc_"+str(leng)+".tsv", "w", encoding = "utf-8" ) as ftmp:
ftmp.write(re.sub(r",","\t",line))
leng += 1
return leng
# ----------SECTION 3 : CREATION OF VOCABULARY, INVERTED INDECES AND SCORE FUNCTIONS----------
# This function takes the path where (preprocessed) documents are saved and their total number
# and returns the vocabulary of the indicated corpus
def create_vocabulary(number_of_docs, path):
vocabulary = {} # The vocabulary is a dictionary of the form "Word : word_id"
wid = 0 # word_id
for idx in range(1,number_of_docs): # for every document..
with open(path+"doc_"+str(idx)+".tsv", "r", encoding = "utf-8" ) as ftmp:
first_line = ftmp.readline() # It opens the doc and reads the first line (in our case docs are made by only one line)
desc = (first_line.split(sep = "\t"))[6] # Takes in account only title and description of the record
title = (first_line.split(sep = "\t"))[9]
# Following lines clean up some unuseful chars
desc = desc.split("'")
title = title.split("'")
foo = ["]","[",", "]
desc = list(filter(lambda x: not x in foo, desc))
title = list(filter(lambda x: not x in foo, title))
for word in title+desc: # For every word in title + description
if not word in list(vocabulary.keys()) : # if the word is not in the dic
vocabulary[word] = wid # adds it
wid += 1 # Update word_id
with open("vocabulary", "wb") as f :
pickle.dump(vocabulary, f) # Saves the vocabulary as a pickle
return vocabulary # Returns the vocabulary
# This function create the first inverted index we need in the form "word (key) : [list of docs that contain word] (value)".
# It takes the number of (preprocessed) docs and the path where they are saved and returns the reverted index as a dictionary.
def create_inverted_index(number_of_docs, path):
inverted_index = {} # Initializes the inverted index, in our case a dic
for idx in range(1,number_of_docs+1): # for every document
# Opens the doc, cleans it and extracts title and description as the previous function
with open(path+"doc_"+str(idx)+".tsv", "r", encoding = "utf-8" ) as ftmp:
first_line = ftmp.readline()
desc = (first_line.split(sep = "\t"))[6]
title = (first_line.split(sep = "\t"))[9]
desc = desc.split("'")
title = title.split("'")
foo = ["]","[",", "]
desc = list(filter(lambda x: not x in foo, desc))
title = list(filter(lambda x: not x in foo, title))
for word in title+desc: # for every word in title + description
if word in list(inverted_index.keys()) : # if the word is in the inverted index
inverted_index[word] = inverted_index[word] + ["doc_"+str(idx)] # adds the current doc to the list of docs that contain the word
else :
inverted_index[word] = ["doc_"+str(idx)] # else creates a record in the dic for the current word and doc
with open("inverted_index", "wb") as f :
pickle.dump(inverted_index, f) # Saves the inverted index as a pickle
return inverted_index # returns the inverted index
# This function takes a term, a riverted index and the total number of docs in the corpus to compute the IDF of the term
def IDFi(term, reverted_index, number_of_docs):
return math.log10(number_of_docs/len(reverted_index[term]))
# This function create the second inverted index we need in the form "word (key) : [(doc that contain the word, TFID of the term in the doc),....]"
# It takes the number of (preprocessed) docs, the path where they are saved, the vocabulary and a list containig all the idfs and returns the reverted index as a dictionary.
def create_inverted_index_with_TFIDF(number_of_docs, path, vocabulary, idfi):
inverted_index2 = {} # Initializes the inverted index, in our case a dic
for idx in range(1, number_of_docs+1): # for every document
# Opens the doc, cleans it and extracts title and description as the previous function
with open(path+"doc_"+str(idx)+".tsv", "r", encoding = "utf-8" ) as ftmp:
first_line = ftmp.readline()
desc = (first_line.split(sep = "\t"))[6]
title = (first_line.split(sep = "\t"))[9]
desc = desc.split("'")
title = title.split("'")
foo = ["]","[",", "]
desc = list(filter(lambda x: not x in foo, desc))
title = list(filter(lambda x: not x in foo, title))
for word in title+desc: # for every word in title + description
if word in list(inverted_index2.keys()) : # if the word is inthe inverted index
# adds to the index line of the current word a tuple that contains the current doc and its TFID for the current word. It uses the vocabulary to get the index of the word
# in the IDF list.
inverted_index2[word] = inverted_index2[word] + [("doc_"+str(idx),((title+desc).count(word)/len(title+desc))*idfi[vocabulary[word]])] # Just applying the def
else :
# Makes the same initializing the index line of the current word
inverted_index2[word] = [("doc_"+str(idx),((title+desc).count(word)/len(title+desc))*idfi[vocabulary[word]])]
with open("inverted_index2", "wb") as f : # Saves the inverted index as a pickle
pickle.dump(inverted_index2, f)
# This function takes the two inverted indices , the (processed) query, the document the query has to be compared to and the vocabulary
# and returns the cosine similarity between them
def score(pquery, document, inverted_index, inverted_index_with_TFIDF, vocabulary, idfi):
#the first vector is made by the all the tfid of the words in thw query. To build it we use a simple list comprehension
# that computes the tfid for all the words in set(query) in order to not process the same word more times
v1 = [((pquery.count(word)/len(pquery))*idfi[vocabulary[word]]) if word in vocabulary.keys() else 0 for word in set(pquery)]
v2 = []
# We don't need to work on vectors in R^(number of distinct words in query+document) becouse, in that case, all elements that
# are not simultaneously non zero will give a 0 contribute in the computation of the similarity,
# so we just need to work in R^(number of different words in query).
#(the optimal solution will be to work in R^(dim of intersection of different words in query+ different words in document)) .
# In the end, to build the vector associated to the doc:
for word in set(pquery) : # for every distinc word in the query
if word in vocabulary.keys(): # if the word is in the corpus vocabulary
if document in inverted_index[word]: # if the document contains the word
idx = inverted_index[word].index(document) # gets the index of the doc in the second inverted index using the first inverted index
# order will be the same
v2.append(inverted_index_with_TFIDF[word][idx][1]) # appends the tfid of the current word for the selected doc
# gettin it from the second inverted index
else: # if the doc doesnt contain the word the associated component is 0
v2.append(0)
else: # if the word is not in the vocabulary the associated component of the doc vectror is 0
v2.append(0)
if not all(v == 0 for v in v2): # if at least one word is in common
return (1 - distance.cosine(v1, v2)) # returns the cosine similarity
else: # if the query and the doc haven't nothing in common their similarity is 0
return 0
# This function implements our score function explained in the notebook. It takes the max rate user prefers to spend, the number of
# bed user prefers to have in it's holiday house, the city user prefers to stay in and one of the doc that match his query and returns it's score.
def score_function(max_cash, pref_bed, pref_city, coords, result):
score = 0
max_distance = 1298 # Normalization factor for the distances computed on the two farthest points of the Texas
cash = float(result["average_rate_per_night"].split("$")[1])
try :
bed = int(result["bedrooms_count"])
except :
bed = 0.5
if (cash < max_cash) & (cash > 0) :
score += (5)*math.exp(-cash/max_cash)
score += (4)*min(bed/pref_bed, pref_bed/bed)
coord = (result.loc["latitude"], result.loc["longitude"])
score += 3*(1 - geodis.distance(coords,coord).km/1298)
return (100/12)*score
# ----------SECTION 4: SEARCH ENGINES----------
# This function implements a search engine that returns the docs containing ALL the words in the query.
# It takes the path where (preprocessed) docs are saved and the inverted index above
# and returns the list of the names of the docs containing all the world of the query, a df containing all features of this docs
# (useful later) and a df containing only the requested features.
# We tried to have some fun inserting code that allows the user to retry the search if it returns no results.
def first_search_engine(inverted_index, path):
check = "y" # This var controls the logic of the multiple attempts
while check == "y": # while it is "y"
print(colored("Insert your query:", "blue", attrs =["bold","underline"]))#(Markdown('<span style="color: #800080">Insert your query: </span>')) # Get users query (asking in a nice colored way :) )
query = input()
pq = query_preProcessing(query) #Preprocesses the query
l = set() # our results are in a set
not_first_word = 0 # Var to know if it's the first word of the query
for el in pq: # for every word in the query
if el in list(inverted_index.keys()): # if the word is in at least one document
if not_first_word == 0: # If it's the first word of the query
l = set(inverted_index[el]) # Adds all the docs that contain the word to results
not_first_word += 1 # The next word is not the first
else : # If it isn't the first word
l = l.intersection(set(inverted_index[el])) # Takes the docs that contain the word in a set and intersect it with
# the set of the results
else: # if a word is not in the corpus there will be no results for this kind of search.
l = [] # empty list
break #exit the loop
if len(l) == 0: # If there are no results
print(colored("Your search did not bring results. Do you wanna try again? [y/n]", "red", attrs = ["underline"])) # Get users query (asking in a nice colored way :) )
check = input() # asks the user if he wanna make another search
while (check != "y")&(check !="n") : # force the user to give an acceptable answer
print(colored("You can choose [y/n] !","red", attrs = ["underline"])) # Get users query (asking in a nice colored way :) )
check = input()
# If the user wants to retry, the while statement loops again
if check == "n": # If the user prefers to not retry
return [],l,pd.DataFrame(), pd.DataFrame() # returns empty data structures
else: # If there are results
res = []
for doc in l : # for every doc of the results creates a df ( a column for each feature, as in the original one)
res.append(pd.read_csv(path +doc+ ".tsv", sep = "\t", engine = "python", names = ["id","average_rate_per_night","bedrooms_count","city","date_of_listing","description","latitude","longitude","title","url"]))
complete_results = | pd.concat(res) | pandas.concat |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
return Series(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
concatenated.sort_index(inplace=True)
return concatenated
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# try to do a fast transform via merge if possible
try:
obj = self._obj_with_exclusions
if isinstance(func, compat.string_types):
result = getattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = getattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
if ((not isinstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
len(result.index) != len(obj.index)):
results = obj.values.copy()
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
return DataFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can merge the result in
# GH 7383
names = result.columns
result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if (isinstance(res, (bool, np.bool_)) or
np.isscalar(res) and isnull(res)):
if res and | notnull(res) | pandas.core.common.notnull |
# -*- coding:utf-8 -*-
import math
from datetime import datetime
PI = 3.1415926585
def delta_days(time_one, time_two):
if isinstance(time_one, str):
time_one = datetime.strptime(time_one, '%Y-%m-%d')
if isinstance(time_two, str):
time_two = datetime.strptime(time_two, '%Y-%m-%d')
return (time_one - time_two).days
def get_date_of_max_temp(temperature_dataframe):
ind = temperature_dataframe['ave_air_temp'].idxmax()
date = temperature_dataframe['date'].iloc[ind]
return date
def simulate_bottom_temperature(temperature_dataframe, kwargs, depth=100):
# Simulate temperature of bottom soil
# temperature_dataframe is temperature data of a year
# depth default is 100cm
# average_thermal_conductivity is λa
# average_heat_capacity is Ca
average_thermal_conductivity = kwargs.get('average_thermal_conductivity')
average_heat_capacity = kwargs.get('average_heat_capacity')
today = kwargs.get('today')
demp = math.sqrt((2 * average_thermal_conductivity / average_heat_capacity) / (2 * PI / 365))
tmp_var = depth / demp
ave_year_air_temp = temperature_dataframe['ave_air_temp'].mean()
air_temp_ampl = temperature_dataframe['ave_air_temp'].max() - temperature_dataframe['ave_air_temp'].min()
max_temp_date = get_date_of_max_temp(temperature_dataframe)
days = delta_days(today, max_temp_date)
# import pdb
# pdb.set_trace()
return ave_year_air_temp + math.pow(air_temp_ampl, tmp_var) * math.cos(2 * PI * days / 365 + tmp_var)
if __name__ == '__main__':
import pandas as pd
import os
filename = 'summary_report_2016-01-01_2020-09-07.txt'
origin_data = | pd.read_csv(filename, header=[0,1], sep=" ") | pandas.read_csv |
"""
This code is not for good model to ,
but to share with my teammate for another competitions under the rule of this competition.
In this script, I gave you a suggestion for extending or wrapping transformers, pipelines and estimator.
As you know, sklearn api having estimators, transformers and pipelines is very nice,
but lacks a flexibility to be used in a competition.
For example,
- can't update pandas or dask DataFrames.
- can't change row numbers (i.e. filter, reduce etc.)
- input validation (Of course, it is the good feature in writing production models.)
-
I recommend
- wrap
- extend
-
Caution!
- This code doesn't run to its end on this kernel because of the exceed of the memory limit.
- Only a tutorial implementation
- This code requires more refactoring
"""
import gc
import itertools
import logging
import os
import re
import sys
from abc import abstractmethod, ABCMeta, ABC
from multiprocessing.pool import Pool
from pathlib import Path
from time import perf_counter
from typing import Union
import keras
import lightgbm as lgb
import numpy as np
import pandas as pd
import pandas.tseries.offsets as offsets
import torch
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import Sequence
from scipy import sparse
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler, OneHotEncoder
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
from tqdm import tqdm
RANDOM_SEED = 10
NEXT_MKTRES_10 = "returnsOpenNextMktres10"
CATEGORY_START_END_PATTERN = r"[\{\}\']"
SPLIT_PATTERN = r"[{}']"
logger = logging.getLogger(__name__)
# logger.addHandler(logging.StreamHandler(sys.stdout))
# logger.addHandler(logging.FileHandler("main.log"))
try:
TEST_MARKET_DATA = Path(__file__).parent.joinpath("data/test/marketdata_sample.csv")
TEST_NEWS_DATA = Path(__file__).parent.joinpath("data/test/news_sample.csv")
except NameError as e:
TEST_MARKET_DATA = "data/test/marketdata_sample.csv"
TEST_NEWS_DATA = "data/test/news_sample.csv"
# MODEL_TYPE = "mlp"
# MODEL_TYPE = "lgb"
MODEL_TYPE = "sparse_mlp"
MARKET_ID = "market_id"
NEWS_ID = "news_id"
np.random.seed(10)
class FeatureSetting(object):
"""
Get remarkable the settings together.
If not in a kenel competition, it can be load from a configuration file.
"""
# remove_news_features = ["headline", "subjects", "headlineTag", "provider"]
remove_news_features = []
should_use_news_feature = True
remove_raw_for_lag = True
scale = True
scale_type = "standard"
# max_shift_date = 14
max_shift_date = 10
# since = date(2010, 1, 1)
since = None
should_use_prev_news = False
def main():
"""
Don't
:return:
"""
logger.info("This model type is {}".format(MODEL_TYPE))
# I don't recommend load because if you want to gc by yourself, you should return to main function.
# Please make an object to store dfs.
env, market_train_df, news_train_df = load_train_dfs()
market_preprocess = MarketPreprocess()
market_train_df = market_preprocess.fit_transform(market_train_df)
news_preprocess = NewsPreprocess()
news_train_df = news_preprocess.fit_transform(news_train_df)
features = Features()
market_train_df, news_train_df = features.fit_transform(market_train_df, news_train_df)
logger.info("First feature extraction has done")
max_day_diff = 3
gc.collect()
if FeatureSetting.should_use_news_feature:
linker = MarketNewsLinker(max_day_diff)
linker.link(market_train_df, news_train_df)
del news_train_df
del market_train_df
gc.collect()
market_train_df = linker.create_new_market_df()
linker.clear()
gc.collect()
else:
linker = None
model = ModelWrapper.generate(MODEL_TYPE)
market_train_df, _ = model.create_dataset(market_train_df, features, train_batch_size=1024,
valid_batch_size=1024)
gc.collect()
model.train(sparse_input=True)
model.clear()
days = env.get_prediction_days()
predictor = Predictor(linker, model, features, market_preprocess, news_preprocess)
predictor.predict_all(days, env)
logger.info('Done!')
env.write_submission_file()
logger.info([filename for filename in os.listdir('.') if '.csv' in filename])
def measure_time(func):
def inner(*args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
duration = perf_counter() - start
logger.info("%s took %.6f sec", func.__name__, duration)
return result
return inner
class UnionFeaturePipeline(object):
def __init__(self, *args):
if args is None:
self.transformers = []
else:
self.transformers = list(args)
def transform(self, df, include_sparse=True):
feature_columns = []
for transformer in self.transformers:
if isinstance(transformer, NullTransformer):
transformer.transform(df)
elif isinstance(transformer, DfTransformer):
df = transformer.transform(df)
else:
feature_columns.append(transformer.transform(df))
if include_sparse:
return df, sparse.hstack(feature_columns, format="csr")
if len(feature_columns) == 0:
return df, None
return df, np.hstack(feature_columns)
def add(self, transformer):
self.transformers.append(transformer)
def is_not_empty(list_like):
if list_like is None:
return False
if isinstance(list_like, np.ndarray) or sparse.issparse(list_like):
return list_like.shape[0] > 0
return len(list_like) > 0
class MarketNewsLinker(object):
"""
For complex join of dataframes,
It would be better two dataframe transformer class
"""
def __init__(self, max_day_diff):
self.market_df = None
self.news_df = None
self.market_columns = None
self.max_day_diff = max_day_diff
self.datatypes_before_aggregation = None
# self.concatable_features = concatable_fields
self.news_columns = None
def link_market_assetCode_and_news_assetCodes(self):
assetCodes_in_markests = self.market_df.assetCode.unique().tolist()
logger.info("assetCodes pattern in markets: {}".format(len(assetCodes_in_markests)))
assetCodes_in_news = self.news_df.assetCodes.unique()
assetCodes_in_news_size = len(assetCodes_in_news)
logger.info("assetCodes pattern in news: {}".format(assetCodes_in_news_size))
parse_multiple_codes = lambda codes: re.sub(SPLIT_PATTERN, "", str(codes)).split(", ")
parsed_assetCodes_in_news = [parse_multiple_codes(str(codes)) for codes in assetCodes_in_news]
# len(max(parsed_assetCodes_in_news, key=lambda x: len(x)))
# all_assetCode_type_in_news = list(set(itertools.chain.from_iterable(assetCodes_in_news)))
# check linking
links_assetCodes = [[[raw_codes, market_assetCode] for parsed_codes, raw_codes in
zip(parsed_assetCodes_in_news, assetCodes_in_news) if
str(market_assetCode) in parsed_codes] for market_assetCode in assetCodes_in_markests]
links_assetCodes = list(itertools.chain.from_iterable(links_assetCodes))
logger.info("links for assetCodes: {}".format(len(links_assetCodes)))
links_assetCodes = pd.DataFrame(links_assetCodes, columns=["newsAssetCodes", "marketAssetCode"],
dtype='category')
logger.info(links_assetCodes.shape)
# self.market_df = self.market_df.merge(links_assetCodes, left_on="assetCode", right_on="marketAssetCode",
# copy=False, how="left", left_index=True)
self.market_df = self.market_df.merge(links_assetCodes, left_on="assetCode", right_on="marketAssetCode",
copy=False, how="left")
logger.info(self.market_df.shape)
self.market_df.drop(["marketAssetCode"], axis=1, inplace=True)
def append_working_date_on_market(self):
self.market_df["date"] = self.market_df.time.dt.date
self.news_df["firstCreatedDate"] = self.news_df.firstCreated.dt.date
self.news_df.firstCreatedDate = self.news_df.firstCreatedDate.astype(np.datetime64)
working_dates = self.news_df.firstCreatedDate.unique()
working_dates.sort()
market_dates = self.market_df.date.unique().astype(np.datetime64)
market_dates.sort()
def find_prev_date(date):
for diff_day in range(1, self.max_day_diff + 1):
prev_date = date - np.timedelta64(diff_day, 'D')
if len(np.searchsorted(working_dates, prev_date)) > 0:
return prev_date
return None
prev_news_days_for_market_day = np.apply_along_axis(arr=market_dates, func1d=find_prev_date, axis=0)
date_df = pd.DataFrame(columns=["date", "prevDate"])
date_df.date = market_dates
date_df.prevDate = prev_news_days_for_market_day
self.market_df.date = self.market_df.date.astype(np.datetime64)
self.market_df = self.market_df.merge(date_df, left_on="date", right_on="date", how="left")
def link_market_id_and_news_id(self):
logger.info("linking ids...")
self.news_columns = self.news_df.columns.tolist()
# merge market and news
market_link_columns = [MARKET_ID, "time", "newsAssetCodes", "date", "prevDate"]
news_link_df = self.news_df[["assetCodes", "firstCreated", "firstCreatedDate", NEWS_ID]]
self.news_df.drop(["assetCodes", "firstCreated", "firstCreatedDate"], axis=1, inplace=True)
link_df = self.market_df[market_link_columns].merge(news_link_df, left_on=["newsAssetCodes", "date"],
right_on=["assetCodes", "firstCreatedDate"], how='left')
link_df = link_df[link_df["time"] > link_df["firstCreated"]]
link_df.drop(["time", "newsAssetCodes", "date", "prevDate"], axis=1, inplace=True)
if FeatureSetting.should_use_prev_news:
prev_day_link_df = self.market_df[market_link_columns].merge(
news_link_df, left_on=["newsAssetCodes", "prevDate"],
right_on=["assetCodes", "firstCreatedDate"])
prev_day_link_df = prev_day_link_df[
prev_day_link_df["time"] - pd.Timedelta(days=1) < prev_day_link_df["firstCreated"]]
prev_day_link_df = prev_day_link_df.drop(
["time", "newsAssetCodes", "date", "prevDate"], axis=1, inplace=True)
del news_link_df
gc.collect()
if FeatureSetting.should_use_prev_news:
# link_df = pd.concat([link_df, prev_day_link_df])
link_df = link_df.append(prev_day_link_df)
del prev_day_link_df
gc.collect()
self.market_df = self.market_df.merge(link_df, on=MARKET_ID, how="left", copy=False)
# self.market_df = self.market_df.merge(link_df, on=MARKET_ID, how="left")
del link_df
gc.collect()
logger.info("shape after append news" + str(self.market_df.shape))
def aggregate_day_asset_news(self):
logger.info("aggregating....")
agg_func_map = {column: "mean" for column in self.market_df.columns.tolist()
if column == "marketCommentary" or column not in self.market_columns}
agg_func_map.update({col: "first"
for col in self.market_columns})
agg_func_map[NEWS_ID] = lambda x: x.tolist()
logger.info(agg_func_map)
logger.info(self.market_df.dtypes)
self.market_df = self.market_df.groupby(MARKET_ID).agg(agg_func_map)
logger.info("the aggregation for each group has done")
self._update_inner_data()
def _update_inner_data(self):
self.market_columns = self.market_df.columns.tolist()
@measure_time
def link(self, market_df, news_df, pool=4):
self.market_df = market_df
self.news_df = news_df
self.pool = pool
self.market_columns = self.market_df.columns.tolist()
self.datatypes_before_aggregation = {col: t for col, t in zip(self.market_columns, self.market_df.dtypes)}
self.datatypes_before_aggregation.update(
{col: t for col, t in zip(self.news_df.columns, self.news_df.dtypes)}
)
self.link_market_assetCode_and_news_assetCodes()
self.append_working_date_on_market()
return self.link_market_id_and_news_id()
@measure_time
def create_new_market_df(self):
logger.info("updating market df....")
dropped_columns = ["date", "prevDate", "newsAssetCodes",
"assetCodes",
"firstCreated", "firstCreatedDate"]
logger.info(self.market_df.columns)
self.market_df.drop(dropped_columns, axis=1, inplace=True)
self.market_df.sort_values(by=MARKET_ID, inplace=True)
self.aggregate_day_asset_news()
logger.info("linking done")
return self.market_df
def clear(self):
del self.market_df
self.market_df = None
self.news_df = None
self.market_columns = None
self.datatypes_before_aggregation = None
def compress_dtypes(news_df):
for col, dtype in zip(news_df.columns, news_df.dtypes):
if dtype == np.dtype('float64'):
news_df[col] = news_df[col].astype("float32")
if dtype == np.dtype('int64'):
news_df[col] = news_df[col].astype("int32")
def load_train_dfs():
"""
define switchable loader to debug in local with the sample data
:return:
"""
try:
from kaggle.competitions import twosigmanews
env = twosigmanews.make_env()
(market_train_df, news_train_df) = env.get_training_data()
except:
market_train_df = pd.read_csv(TEST_MARKET_DATA, encoding="utf-8", engine="python")
news_train_df = pd.read_csv(TEST_NEWS_DATA, encoding="utf-8", engine="python")
env = None
return env, market_train_df, news_train_df
class TorchDataset(Dataset):
def __init__(self, matrix, labels, transformers=None):
self._matrix = matrix
self._labels = labels
self._transformers = transformers
self.n_features = matrix.shape[-1]
def __getitem__(self, index):
item = self._matrix[index, :]
if self._transformers is None:
return item, torch.Tensor(self._labels[index:index + 1])
return self._transformers(item), torch.Tensor(self._labels[index:index + 1])
def __len__(self):
return self._matrix.shape[0]
class TorchDataLoader(DataLoader):
def __init__(self, dataset: TorchDataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0,
collate_fn=default_collate, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None):
super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory,
drop_last, timeout, worker_init_fn)
def __len__(self):
return len(self.dataset)
def create_data_loader(matrix: Union[np.ndarray, sparse.coo_matrix, sparse.csr_matrix],
labels: np.ndarray, batch_size: int, shuffle: bool):
if np.isnan(labels).any():
raise ValueError("remove nan from labels")
if isinstance(matrix, np.ndarray):
if np.isnan(matrix).any():
raise ValueError("remove nan from feature matrix")
def transformers(item):
item = item.astype("float32")
if sparse.issparse(matrix):
return torch.from_numpy(item.todense().A1)
item = torch.from_numpy(item)
return item
dataset = TorchDataset(matrix, labels.astype("uint8").reshape((-1, 1)), transformers)
return TorchDataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
class BaseMLPClassifier(nn.Module):
def __init__(self, fc_layer_params: list):
super().__init__()
layers = [
nn.Sequential(
nn.Linear(**params),
nn.BatchNorm1d(params["out_features"]),
nn.ReLU(),
nn.Dropout(0.4)
)
for i, params in enumerate(fc_layer_params[:-1])
]
for layer in layers:
layer.apply(self.init_weights)
self.fc_layers = nn.Sequential(*layers)
self.output_layer = nn.Linear(**fc_layer_params[-1])
# if self.output_layer.out_features == 1:
self.sigmoid = nn.Sigmoid()
@staticmethod
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform(m.weight.data)
m.bias.data.zero_()
def forward(self, x):
out = self.fc_layers(x)
out = self.output_layer(out)
out = self.sigmoid(out)
return out
class BaseMLPTrainer(object):
def __init__(self, model, loss_function, score_function, optimizer_factory):
self.model: nn.Module = model
self.loss_function = loss_function
self.score_function = score_function
self.optimiser = optimizer_factory(self.model)
self.train_data_loader = None
self.valid_data_loader = None
self.n_epoch = None
self._current_epoch = 0
self.train_losses = []
self.train_scores = []
self.valid_losses = []
self.valid_scores = []
self._current_max_valid_score = 0
self._early_stop_count = 0
self.save_name = "twosigma.model"
def train(self, train_data_loader, valid_data_loader, n_epochs):
self.clear_history()
self.train_data_loader = train_data_loader
self.valid_data_loader = valid_data_loader
self.n_epoch = n_epochs
logger.info("train with: {}".format(self.train_data_loader.dataset._matrix.shape))
logger.info("valid with: {}".format(self.valid_data_loader.dataset._matrix.shape))
iterator = tqdm(range(n_epochs))
for epoch in iterator:
self._current_epoch = epoch + 1
logger.info("training %d epoch / n_epochs", self._current_epoch)
self._train_epoch()
self._valid_epoch()
if self.valid_scores[-1] <= self._current_max_valid_score:
self._early_stop_count += 1
else:
logger.info("validation score is improved from %.3f to %.3f",
self._current_max_valid_score, self.valid_scores[-1])
self._current_max_valid_score = self.valid_scores[-1]
self._early_stop_count = 0
self.save_models()
if self._early_stop_count >= 10:
logger.info("======early stopped=====")
self.model.load_state_dict(torch.load(self.save_name))
iterator.close()
break
logger.info("train done!")
def clear_history(self):
self.n_epoch = None
self._current_epoch = 0
self.train_losses = []
self.train_scores = []
self.valid_losses = []
self.valid_scores = []
self._current_max_valid_score = 0
self._early_stop_count = 0
def _train_epoch(self):
self.model.train()
total_loss = 0.0
for i, data in enumerate(self.train_data_loader):
inputs, labels = data
# print("batch data size {}".format(inputs.size()))
self.optimiser.zero_grad()
outputs = self.model(inputs)
loss = self.loss_function(outputs, labels)
loss.backward()
self.optimiser.step()
total_loss += loss.item()
if i % 2000 == 1999:
logger.info('[%d, %5d] loss: %.7f' %
(self._current_epoch, i + 1, total_loss / (i + 1)))
avg_loss = total_loss / len(self.train_data_loader)
logger.info("******train loss at epoch %d: %.7f :" % (self._current_epoch, avg_loss))
self.train_losses.append(avg_loss)
def _valid_epoch(self):
total_loss = 0.0
all_labels = []
all_outputs = []
self.model.eval()
for i, data in enumerate(self.valid_data_loader):
inputs, labels = data
outputs = self.model(inputs)
all_labels.append(labels.detach().numpy())
all_outputs.append(outputs.detach().numpy())
loss = self.loss_function(outputs, labels)
total_loss += loss.item()
if i % 2000 == 1999:
logger.info('[%d, %5d] validation loss: %.7f' %
(self._current_epoch, i + 1, total_loss / (i + 1)))
avg_loss = total_loss / len(self.valid_data_loader)
self.valid_losses.append(avg_loss)
logger.info("******valid loss at epoch %d: %.7f :" % (self._current_epoch, avg_loss))
all_outputs = np.vstack(all_outputs).reshape((-1))
all_labels = np.vstack(all_labels).reshape((-1))
score = self.score_function(all_outputs, all_labels)
logger.info("******valid score at epoch %d: %.3f :" % (self._current_epoch, score))
self.valid_scores.append(score)
def save_models(self):
torch.save(self.model.state_dict(), self.save_name)
logger.info("Checkpoint saved")
class ModelWrapper(ABC):
"""
Wrap models to absorb difference of the libraries.
It should inherit sklearn's estimator and transformer.
"""
def __init__(self, **kwargs):
self.model = None
super().__init__(**kwargs)
@abstractmethod
def predict(self, X: np.ndarray):
return None
@abstractmethod
def train(self, **kwargs):
return self
@staticmethod
def generate(model_type):
if model_type == "lgb":
return LgbWrapper()
elif model_type == "mlp":
return MLPWrapper()
elif model_type == "sparse_mlp":
return SparseMLPWrapper()
else:
raise ValueError("unknown model type: {}".format(model_type))
@staticmethod
def split_train_validation(train_X, train_Y, train_size):
train_size = int(len(train_X) * train_size)
valid_X, valid_Y = train_X[train_size:], train_Y[train_size:]
train_X, train_Y = train_X[:train_size], train_Y[:train_size]
return train_X, valid_X, train_Y, valid_Y
@staticmethod
def to_x_y(df):
def to_Y(df):
return np.asarray(df.confidence)
train_Y = to_Y(df=df)
df.drop(["confidence"], axis=1, inplace=True)
market_obs_ids = df[MARKET_ID]
df.drop([MARKET_ID], axis=1, inplace=True)
return train_Y, market_obs_ids
@abstractmethod
def create_dataset(self, market_train, features, train_batch_size, valid_batch_size):
return None, None
class MLPWrapper(ModelWrapper):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def predict(self, x: Union[np.ndarray, sparse.spmatrix]):
logger.info("predicting %d samples...".format(x.shape[0]))
self.model.eval()
if sparse.issparse(x):
x = x.todense()
x = torch.from_numpy(x.astype("float32"))
return self.model(x).detach().numpy().reshape((-1))
def train(self, **kwargs):
classes = 1
model = BaseMLPClassifier(
[{"in_features": self.train_data_loader.dataset.n_features, "out_features": 128, "bias": True},
{"in_features": 128, "out_features": 64, "bias": True},
{"in_features": 64, "out_features": 16, "bias": True},
{"in_features": 16, "out_features": classes, "bias": True},
]
)
def score_function(predicted, labels):
return roc_auc_score(labels, predicted)
optimizer_factory = lambda model: optim.Adam(model.parameters(), lr=1e-3, weight_decay=0.0001)
trainer = BaseMLPTrainer(model, loss_function=nn.BCELoss(),
score_function=score_function,
optimizer_factory=optimizer_factory)
trainer.train(self.train_data_loader, self.valid_data_loader, 50)
self.model = model
return self
def create_dataset(self, market_train, features, train_batch_size, valid_batch_size):
feature_names, market_obs_ids, market_train, labels = ModelWrapper.to_x_y(market_train)
logger.info("concatenating train x....")
market_train = market_train.astype("float32")
if is_not_empty(features):
features = features.astype("float32")
market_train = sparse.hstack([market_train, features], format="csr")
market_train, valid_matrix, labels, valid_labels, = ModelWrapper.split_train_validation(
market_train,
labels,
train_size=0.8)
logger.info("creating torch dataset....")
market_train = market_train.astype("float32")
valid_matrix = valid_matrix.astype("float32")
self.train_data_loader = create_data_loader(market_train, labels, batch_size=train_batch_size, shuffle=True)
self.valid_data_loader = create_data_loader(valid_matrix, valid_labels, batch_size=valid_batch_size,
shuffle=True)
logger.info("torch dataset is created!")
return None, None
class LgbWrapper(ModelWrapper):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@measure_time
def train(self, **kwargs):
hyper_params = {"objective": "binary", "boosting": "gbdt", "num_iterations": 500,
"learning_rate": 0.2, "num_leaves": 2500,
"num_threads": 2, "max_bin": 205, 'min_data_in_leaf': 210,
"seed": RANDOM_SEED, "early_stopping_round": 10
}
# ## train
model = lgb.train(params=hyper_params, train_set=self.x, valid_sets=[self.valid_X])
for feature, imp in zip(model.feature_name(), model.feature_importance()):
logger.info("{}: {}".format(feature, imp))
#
del self.x
del self.valid_X
gc.collect()
self.model = model
return self
def predict(self, X):
return self.model.predict(X)
def create_dataset(self, df, features, train_batch_size, valid_batch_size):
y, self.market_obs_ids = ModelWrapper.to_x_y(df)
train_size = 0.8
self.x, self.valid_X, y, valid_Y = ModelWrapper.split_train_validation(
features, y,
train_size
)
self.x = lgb.Dataset(self.x, label=y,
free_raw_data=False)
self.valid_X = self.x.create_valid(self.valid_X, label=valid_Y)
del valid_Y
return None, None
class Preprocess(object):
def __init__(self):
self.transformers = []
def fit_transform(self, df: pd.DataFrame):
for new_col_name, transformer, col_name in self.transformers:
if not new_col_name:
new_col_name = col_name
df[new_col_name] = transformer.fit_transform(to_2d_array(df[col_name]))
return df
def transform(self, df: pd.DataFrame):
for new_col_name, transformer, col_name in self.transformers:
if not new_col_name:
new_col_name = col_name
df[new_col_name] = transformer.transform(to_2d_array(df[col_name]))
return df
class MarketPreprocess(Preprocess):
def __init__(self):
super().__init__()
transformers = []
if FeatureSetting.since is not None:
transformers.append(DateFilterTransformer(FeatureSetting.since, "time"))
transformers.extend([
IdAppender(MARKET_ID),
ConfidenceAppender(),
LagAggregationTransformer(lags=[3, 5, 10], shift_size=1, scale=True, n_pool=3)
])
self.pipeline: UnionFeaturePipeline = UnionFeaturePipeline(
*transformers
)
def fit_transform(self, df: pd.DataFrame):
df = super().fit_transform(df)
return self.pipeline.transform(df, include_sparse=False)[0]
def transform(self, df: pd.DataFrame):
df = super().transform(df)
return self.pipeline.transform(df, include_sparse=False)[0]
class NewsPreprocess(Preprocess):
def __init__(self):
super().__init__()
transformers = []
if FeatureSetting.since is not None:
transformers.append(DateFilterTransformer(FeatureSetting.since, "firstCreated"))
transformers.append(IdAppender(NEWS_ID))
self.pipeline: UnionFeaturePipeline = UnionFeaturePipeline(
*transformers
)
def fit_transform(self, df: pd.DataFrame):
df = super().fit_transform(df)
return self.pipeline.transform(df, include_sparse=False)[0]
def transform(self, df: pd.DataFrame):
df = super().transform(df)
return self.pipeline.transform(df, include_sparse=False)[0]
class ReshapeInto2d(FunctionTransformer):
def __init__(self,
kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
super().__init__(to_2d_array, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
def to_2d_array(x):
array = x
if isinstance(x, pd.Series):
array = array.values
if len(array.shape) == 1:
array = array.reshape((-1, 1))
return array
class LogTransformer(FunctionTransformer):
def __init__(self,
kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
super().__init__(LogTransformer.to_log, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
@staticmethod
def to_log(x):
input_ = x
# input_ = input_
return np.log1p(input_)
class RavelTransformer(FunctionTransformer):
def __init__(self,
kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
super().__init__(RavelTransformer.f, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
@staticmethod
def f(x):
return x.ravel()
class TahnEstimators(BaseEstimator, TransformerMixin):
"""
refer
https://stats.stackexchange.com/questions/7757/data-normalization-and-standardization-in-neural-networks
https://stackoverflow.com/questions/43061120/tanh-estimator-normalization-in-python
"""
def __init__(self):
self.std_ = None
self.mean_ = None
self.n_seen_samples = None
def fit(self, X, y=None):
self.mean_ = np.mean(X)
self.std_ = np.std(X)
return self
def transform(self, X, copy=None):
return 0.5 * (np.tanh(0.01 * (to_2d_array(X) - self.mean_) / self.std_) + 1)
class Features(object):
def __init__(self):
self.market_transformer = MarketFeatureTransformer()
self.news_transformer = NewsFeatureTransformer()
def fit(self, market_train_df: pd.DataFrame, news_train_df: pd.DataFrame):
self.market_transformer.fit(market_train_df)
self.news_transformer.fit(news_train_df)
logger.info("feature fitting has done")
return self
def transform(self, market_train_df: pd.DataFrame, news_train_df: pd.DataFrame):
logger.info("transforming into feature")
return self.market_transformer.transform(market_train_df), self.news_transformer.transform(news_train_df)
def fit_transform(self, market_train_df: pd.DataFrame, news_train_df: pd.DataFrame):
return self.fit(market_train_df, news_train_df).transform(market_train_df, news_train_df)
def get_linked_feature_matrix(self, link_df, market_indices=None):
# print(link_df)
link_df, news_feature_matrix = self.news_transformer.post_link_transform(link_df)
# return sparse.hstack([self.market_transformer.feature_matrix, news_feature_matrix], dtype="float32",
# format="csr")
# return sparse.hstack([self.market_transformer.feature_matrix, news_feature_matrix], dtype="uint8",
# format="csr")
if market_indices is None and isinstance(link_df, pd.DataFrame):
market_indices = link_df[MARKET_ID].tolist()
return np.hstack([self.market_transformer.feature_matrix[market_indices], news_feature_matrix])
def clear(self):
self.market_transformer.clear()
self.news_transformer.clear()
def get_feature_num(self):
return self.market_transformer.feature_matrix.shape[1] + self.news_transformer.feature_matrix.shape[1]
def log_object_sizes():
for memory_info in ["{}: {}".format(v, sys.getsizeof(eval(v)) / 1000000000) for v in dir()]:
logger.info(memory_info)
class FeatureTransformer(metaclass=ABCMeta):
@abstractmethod
def transform(self, df):
pass
def fit(self, df):
pass
@abstractmethod
def release_raw_field(self, df):
pass
def fit_transform(self, df):
pass
class NullTransformer(FeatureTransformer):
def transform(self, df):
pass
def release_raw_field(self, df):
pass
class DfTransformer(FeatureTransformer):
def transform(self, df):
return df
class DropColumnsTransformer(NullTransformer):
def __init__(self, columns):
self.columns = columns
def transform(self, df):
df.drop(self.columns, axis=1, inplace=True)
gc.collect()
class DateFilterTransformer(DfTransformer):
def __init__(self, since_date, column="time"):
self.since_date = since_date
self.column = column
def transform(self, df):
df = df[df[self.column].dt.date >= self.since_date]
return df
def release_raw_field(self, df):
pass
# based on https://www.kaggle.com/qqgeogor/eda-script-67
class LagAggregationTransformer(DfTransformer):
LAG_FEATURES = ['returnsClosePrevMktres10', 'returnsClosePrevRaw10', 'open', 'close']
def __init__(self, lags, shift_size, scale=True, remove_raw=False, n_pool=4):
self.lags = lags
self.shift_size = shift_size
self.scale = scale
if scale:
self.scaler = None
self.remove_raw = remove_raw
self.imputer = None
self.n_pool = n_pool
@measure_time
def transform(self, df, n_pool=None):
if not n_pool:
self.n_pool = n_pool
df.sort_values(by="time", axis=0, inplace=True)
logger.info("start extract lag...")
group_features = [MARKET_ID, "time", "assetCode"] + self.LAG_FEATURES
asset_code_groups = df[group_features].groupby("assetCode")
asset_code_groups = [asset_code_group[1][group_features]
for asset_code_group in asset_code_groups]
with Pool(self.n_pool) as pool:
group_dfs = pool.map(self.extract_lag, asset_code_groups)
group_dfs = pd.concat(group_dfs)
group_dfs.drop(["time", "assetCode"] + self.LAG_FEATURES, axis=1, inplace=True)
df = df.merge(group_dfs, how="left", copy=False, on=MARKET_ID)
new_columns = list(itertools.chain.from_iterable(
[['%s_lag_%s_mean' % (col, lag), '%s_lag_%s_max' % (col, lag), '%s_lag_%s_min' % (col, lag)]
for col, lag in itertools.product(self.LAG_FEATURES, self.lags)]))
if self.remove_raw:
df.drop(self.LAG_FEATURES, axis=1, inplace=True)
for col in new_columns:
df[col] = df[col].astype("float32")
logger.info("Lag Aggregation has done")
return df
def extract_lag(self, asset_code_group):
for col in self.LAG_FEATURES:
for lag in self.lags:
rolled = asset_code_group[col].shift(self.shift_size).rolling(window=lag)
lag_mean = rolled.mean()
lag_max = rolled.max()
lag_min = rolled.min()
# lag_std = rolled.std()
asset_code_group['%s_lag_%s_mean' % (col, lag)] = lag_mean
asset_code_group['%s_lag_%s_max' % (col, lag)] = lag_max
asset_code_group['%s_lag_%s_min' % (col, lag)] = lag_min
return asset_code_group
def release_raw_field(self, df):
pass
def fit_transform(self, df):
return self.transform(df)
class IdAppender(DfTransformer):
def __init__(self, id_name):
super().__init__()
self.id_name = id_name
def transform(self, df):
df[self.id_name] = df.index.astype("int32")
return df
def release_raw_field(self, df):
pass
def fit_transform(self, df):
return self.transform(df)
class ConfidenceAppender(DfTransformer):
def transform(self, df):
if NEXT_MKTRES_10 in df.columns:
df["confidence"] = df[NEXT_MKTRES_10] >= 0
return df
def release_raw_field(self, df):
pass
def fit_transform(self, df):
return self.transform(df)
class LimitMax(FunctionTransformer):
def __init__(self, upper_limit,
kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
self.upper_limit = upper_limit
super().__init__(self.f, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
def f(self, X, y=None):
return np.where(X >= self.upper_limit, X, self.upper_limit).reshape((-1, 1))
class NullTransformer(FunctionTransformer):
def __init__(self, kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
super().__init__(self.f, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
def f(self, X, y=None):
return X
class WeekDayTransformer(FunctionTransformer):
def __init__(self, kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
super().__init__(self.f, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
def f(self, X, y=None):
return np.cos(pd.Series(X).dt.dayofweek.values / 7).astype("float32").reshape((-1, 1))
class MonthTransformer(FunctionTransformer):
def __init__(self, kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
super().__init__(self.f, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
def f(self, X, y=None):
return np.cos(pd.Series(X).dt.month.values / 12).astype("float32").reshape((-1, 1))
class DayTransformer(FunctionTransformer):
def __init__(self, kw_args=None, inv_kw_args=None):
validate = False
inverse_func = None
accept_sparse = False
pass_y = 'deprecated'
super().__init__(self.f, inverse_func, validate, accept_sparse, pass_y, kw_args, inv_kw_args)
def f(self, X, y=None):
return np.cos(pd.Series(X).dt.day.values / 31).astype("float32").reshape((-1, 1))
class MarketFeatureTransformer(DfTransformer):
NUMERIC_COLUMNS = ['volume', 'close', 'open',
'returnsClosePrevRaw1', 'returnsOpenPrevRaw1',
'returnsClosePrevMktres1', 'returnsOpenPrevMktres1',
'returnsClosePrevRaw10', 'returnsOpenPrevRaw10',
'returnsClosePrevMktres10', 'returnsOpenPrevMktres10']
LAG_FEATURES = new_columns = list(itertools.chain.from_iterable(
[['%s_lag_%s_mean' % (col, lag), '%s_lag_%s_max' % (col, lag), '%s_lag_%s_min' % (col, lag)]
for col, lag in itertools.product(
['returnsClosePrevMktres10', 'returnsClosePrevRaw10', 'open', 'close'], [3, 5, 10])]))
LABEL_OBJECT_FIELDS = ['assetName']
DROP_COLS = ['universe', "returnsOpenNextMktres10"]
TIME_COLS = ['time']
def __init__(self):
transformers = []
# transformers.extend([
# (col,
# Pipeline([
# ("log", LogTransformer()),
# ("normalize", StandardScaler(copy=False)),
# ("fill_missing", SimpleImputer(strategy="median"))]),
# [col]) for col in set(self.COLUMNS_SCALED) & set(self.LOG_NORMAL_FIELDS)
# ])
scaled_columns = self.NUMERIC_COLUMNS
if FeatureSetting.max_shift_date > 0:
scaled_columns.extend(self.LAG_FEATURES)
transformers.extend(
[
(col,
Pipeline([
# ("normalize", StandardScaler(copy=False)),
("fill_missing", SimpleImputer(strategy="median"))]),
# ("discrete", KBinsDiscretizer(n_bins=10, encode="onehot", strategy="quantile"))]),
[col]) for col in scaled_columns
]
)
transformers.extend(
[
("time_week", WeekDayTransformer(), self.TIME_COLS[0]),
("time_month", MonthTransformer(), self.TIME_COLS[0]),
("time_day", DayTransformer(), self.TIME_COLS[0])
]
)
# transformers.extend(
# [(col + "bow",
# Pipeline([
# ("fill_missing", SimpleImputer(strategy="constant", fill_value="")),
# ('flatten', RavelTransformer()),
# ("encode", CountVectorizer(decode_error="ignore",
# stop_words=None,
# strip_accents="unicode",
# max_features=100,
# min_df=30,
# binary=True))
# ]), [col]) for col in self.LABEL_OBJECT_FIELDS]),
self.encoder: ColumnTransformer = ColumnTransformer(transformers=transformers)
self.feature_matrix = None
def transform(self, df):
self.feature_matrix = self.encoder.transform(df).astype("float32")
# self.feature_matrix = self.feature_matrix
self.release_raw_field(df)
return df
def fit(self, df):
self.encoder.fit(df)
return self
def fit_transform(self, df):
return self.fit(df).transform(df)
def release_raw_field(self, df: pd.DataFrame):
drop_cols = list \
(set(self.NUMERIC_COLUMNS + self.LABEL_OBJECT_FIELDS))
for col in self.DROP_COLS:
if col in df.columns:
drop_cols.append(col)
df.drop(drop_cols, axis=1, inplace=True)
gc.collect()
def clear(self):
self.feature_matrix = None
gc.collect()
class NewsFeatureTransformer(DfTransformer):
RAW_COLS = ['relevance', 'sentimentNegative', 'sentimentNeutral', 'sentimentPositive', 'marketCommentary']
LOG_NORMAL_FIELDS = [
'bodySize',
'sentenceCount', 'wordCount',
'sentimentWordCount', 'noveltyCount12H', 'noveltyCount24H',
'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D', 'volumeCounts12H',
'volumeCounts24H', 'volumeCounts3D', 'volumeCounts5D',
'volumeCounts7D']
COLUMNS_SCALED = [
'takeSequence',
'bodySize', 'companyCount',
'sentenceCount', 'wordCount',
'sentimentWordCount', 'noveltyCount12H', 'noveltyCount24H',
'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D', 'volumeCounts12H',
'volumeCounts24H', 'volumeCounts3D', 'volumeCounts5D',
'volumeCounts7D']
LABEL_COLS = ["sentimentClass", "provider", "urgency"]
FIRST_MENTION_SENTENCE = "firstMentionSentence"
BOW_COLS = ["headline"]
MULTI_LABEL_COLS = ["subjects", "audiences"]
LABEL_OBJECT_FIELDS = ['headlineTag']
DROP_COLS = ['time', 'sourceId', 'sourceTimestamp', "assetName"]
NUMERIC_COLS = list(set(RAW_COLS + LOG_NORMAL_FIELDS + COLUMNS_SCALED))
NUMERIC_COL_INDICES = list(range(len(NUMERIC_COLS)))
N_NUMERIC_COLS = len(NUMERIC_COLS)
def __init__(self):
transformers = []
transformers.extend(
[
(
col,
"passthrough",
# KBinsDiscretizer(n_bins=10, encode="onehot", strategy="quantile"),
[col]
) for col in self.RAW_COLS
]
)
transformers.extend([
(col,
Pipeline([
("log", LogTransformer()),
("normalize", StandardScaler(copy=False)),
("fill_missing", SimpleImputer(strategy="median"))]),
# ("discrete", KBinsDiscretizer(n_bins=10, encode="onehot", strategy="quantile"))]),
[col]) for col in set(self.COLUMNS_SCALED) & set(self.LOG_NORMAL_FIELDS)
])
transformers.extend(
[
(col,
Pipeline([
("normalize", StandardScaler(copy=False)),
("fill_missing", SimpleImputer(strategy="median"))]),
# ("discrete", KBinsDiscretizer(n_bins=10, encode="onehot", strategy="quantile"))]),
[col]) for col in set(self.COLUMNS_SCALED) - set(self.LOG_NORMAL_FIELDS)
]
)
delay_transformers = []
delay_transformers.extend([
(col, OneHotEncoder(sparse=True, handle_unknown='ignore', dtype='uint8'), [col])
for col in self.LABEL_COLS])
delay_transformers.append(
(self.FIRST_MENTION_SENTENCE,
Pipeline(
[("limitMax", LimitMax(4)),
("encoder", OneHotEncoder(sparse=True, handle_unknown='ignore', dtype='uint8'))]),
[self.FIRST_MENTION_SENTENCE])
)
# transformers.extend(
# [(col + "bow",
# Pipeline([
# ("fill_missing", SimpleImputer(strategy="constant", fill_value="")),
# ('flatten', RavelTransformer()),
# ("encode", CountVectorizer(decode_error="ignore",
# stop_words="english",
# strip_accents="unicode",
# max_features=2000,
# binary=True, dtype="uint8"))
# ]), [col]) for col in self.BOW_COLS]),
delay_transformers.extend(
[(col, CountVectorizer(decode_error="ignore",
strip_accents="unicode",
min_df=5,
max_features=2000,
binary=True, dtype="uint8"), col)
for col in self.MULTI_LABEL_COLS])
delay_transformers.extend(
[(col,
Pipeline([
("fill_missing", SimpleImputer(strategy="constant", fill_value="UNKNOWN")),
("encoder", OneHotEncoder(sparse=True, handle_unknown='ignore', dtype='uint8'))
]),
[col])
for col in self.LABEL_OBJECT_FIELDS]
)
self.encoder: ColumnTransformer = ColumnTransformer(transformers=transformers)
self.delay_encoder: ColumnTransformer = ColumnTransformer(transformers=delay_transformers)
self.feature_matrix = None
self.store_df: pd.DataFrame = None
self.n_delay_features = None
def transform(self, df):
self.feature_matrix = self.encoder.transform(df).astype("float32")
# self.feature_matrix = self.feature_matrix.tocsr()
self.store_df = df[
self.LABEL_COLS + [self.FIRST_MENTION_SENTENCE] + self.MULTI_LABEL_COLS + self.LABEL_OBJECT_FIELDS]
self.release_raw_field(df)
return df
def fit(self, df):
self.encoder.fit(df)
self.delay_encoder.fit(df)
self.n_delay_features = self._get_delay_faeture_num()
return self
def fit_transform(self, df):
return self.fit(df).transform(df)
def release_raw_field(self, df):
drop_cols = list \
(set(self.RAW_COLS + [self.FIRST_MENTION_SENTENCE] + self.LABEL_COLS + self.MULTI_LABEL_COLS + self.BOW_COLS
+ self.LOG_NORMAL_FIELDS + self.LABEL_OBJECT_FIELDS + self.COLUMNS_SCALED + self.DROP_COLS))
df.drop(drop_cols, axis=1, inplace=True)
gc.collect()
def clear(self):
self.feature_matrix = None
self.store_df = None
self.n_delay_features = None
gc.collect()
# @measure_time
def aggregate(self, list_of_indices, pool_size=4, binary=True):
self.encoded_cols_indices = list(range(self.N_NUMERIC_COLS, self.feature_matrix.shape[1]))
# print(list_of_indices)
# with Pool(pool_size) as pool:
# list_of_indices = pool.map(self.get_partial_agg, list_of_indices)
list_of_indices = [self.get_partial_agg(indices) for indices in list_of_indices]
# list_of_indices = sparse.csr_matrix(np.vstack(list_of_indices), dtype="float32")
# list_of_indices = sparse.csr_matrix(np.vstack(list_of_indices), dtype="uint8")
# list_of_indices = np.vstack(list_of_indices)
list_of_indices = sparse.vstack(list_of_indices, dtype="float32", format="csr")
# if binary:
# rows[rows != 0] = 1
return list_of_indices
def _get_delay_faeture_num(self):
total = 0
for transfomer_tuple in self.delay_encoder.transformers:
transfomer = transfomer_tuple[1]
if isinstance(transfomer, CountVectorizer):
total += len(transfomer.vocabulary)
elif isinstance(transfomer, OneHotEncoder):
total += len(transfomer.categories)
elif isinstance(transfomer, Pipeline):
total += len(transfomer.named_steps["encoder"].categories)
return total
def get_partial_agg(self, ids):
if not isinstance(ids, list) or np.isnan(ids[0]):
# empty_feature = np.zeros((1, self.feature_matrix.shape[1] + ), dtype="float32")
empty_feature = sparse.csr_matrix((1, self.feature_matrix.shape[1] + self.n_delay_features),
dtype="float32")
# empty_feature = np.zeros((1, self.feature_matrix.shape[1]), dtype="uint8")
return empty_feature
# numeric_partial = self.feature_matrix[ids][:, self.NUMERIC_COL_INDICES].mean(axis=0)
# encoded_partial = self.feature_matrix[ids][:, self.encoded_cols_indices].sum(axis=0)
# encoded_partial = self.feature_matrix[ids].sum(axis=0)
# encoded_partial[encoded_partial != 0] = 1
return sparse.hstack([self.feature_matrix[[int(id) for id in ids], :].mean(axis=0).reshape((1, -1)),
self.delay_encoder.transform(self.store_df.iloc[ids]).sum(axis=0).reshape((1, -1))],
dtype="float32")
def post_link_transform(self, links):
if isinstance(links, pd.DataFrame):
aggregate_feature = self.aggregate(links[NEWS_ID].tolist())
links.drop(NEWS_ID, axis=1, inplace=True)
else:
aggregate_feature = self.aggregate(links)
return links, aggregate_feature
class TfDataGenerator(Sequence):
def __init__(self, list_of_indices, features: Features, labels, batch_size=200):
self.list_of_indices = list_of_indices
# print(list_of_indices)
self.features = features
self.labels = labels
self.batch_size = batch_size
self.n_samples = len(self.list_of_indices)
self.n_batches = self.n_samples // self.batch_size + int(bool(self.n_samples % self.batch_size))
self._current_batch_num = 0
def __len__(self):
return self.n_batches
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
start = index * self.batch_size
if index < self.n_batches - 1:
end = (index + 1) * self.batch_size
return self.features.get_linked_feature_matrix(
self.list_of_indices[start:end], market_indices=list(range(start, end))), self.labels[start:end]
# index += 1
else:
return self.features.get_linked_feature_matrix(self.list_of_indices[start:],
market_indices=list(
range(start, len(self.list_of_indices)))), \
self.labels[start:]
# index = 0
def on_epoch_end(self):
pass
class SparseMLPWrapper(ModelWrapper):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.train_data_generator: TfDataGenerator = None
self.valid_data_generator: TfDataGenerator = None
def predict(self, x: Union[np.ndarray, sparse.spmatrix]):
self.model = keras.models.load_model("mlp.model.h5")
logger.info("predicting %d samples...".format(x.shape[0]))
return self.model.predict(x)
def train(self, sparse_input=False, **kwargs):
input_ = keras.layers.Input(shape=(self.train_data_generator.features.get_feature_num(),), sparse=sparse_input,
dtype="float32")
x = keras.layers.Dense(192, activation='relu', kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l1_l2(1e-4, 1e-3))(input_)
x = keras.layers.Dropout(0.4)(x)
x = keras.layers.Dense(64, activation='relu', kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l1_l2(1e-4, 1e-3))(x)
x = keras.layers.Dropout(0.3)(x)
x = keras.layers.Dense(64, activation='relu', kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l1_l2(1e-4, 1e-3))(x)
x = keras.layers.Dropout(0.2)(x)
output_ = keras.layers.Dense(1, activation="softmax", kernel_initializer='lecun_normal')(x)
self.model = keras.Model(inputs=input_, outputs=output_)
self.model.summary()
checkpointer = ModelCheckpoint(filepath="mlp.model.h5",
verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')
self.model.compile(
loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(lr=2e-2, decay=0.001),
metrics=["accuracy"]
)
self.model.fit_generator(self.train_data_generator, self.train_data_generator.n_batches,
epochs=50, validation_data=self.valid_data_generator,
validation_steps=self.valid_data_generator.n_batches, verbose=0,
callbacks=[checkpointer, early_stopping], shuffle=True)
def create_dataset(self, market_train, features, train_batch_size, valid_batch_size):
y, _ = ModelWrapper.to_x_y(market_train)
# print(market_train[NEWS_ID])
list_of_indices = market_train[NEWS_ID].tolist()
list_of_indices, valid_indices, y, valid_y = ModelWrapper.split_train_validation(list_of_indices, y,
train_size=0.8)
self.train_data_generator = TfDataGenerator(list_of_indices, features, y, batch_size=train_batch_size)
self.valid_data_generator = TfDataGenerator(valid_indices, features, valid_y, batch_size=valid_batch_size)
return None, None
def clear(self):
self.train_data_generator = None
self.valid_data_generator = None
gc.collect()
class Predictor(object):
def __init__(self, linker, model, features, market_preprocess, news_preprocess):
self.linker = linker
self.model = model
self.features: Features = features
self.market_preprocess = market_preprocess
self.news_preprocess = news_preprocess
def predict_all(self, days, env):
logger.info("=================prediction start ===============")
stored_market_df = None
stored_news_df = None
max_time = None
predict_start_id = 0
def store_past_data(market_df, news_df, max_store_date=0):
nonlocal stored_market_df
nonlocal stored_news_df
nonlocal predict_start_id
if stored_market_df is None or max_store_date == 0:
stored_market_df = market_df
stored_news_df = news_df
predict_start_id = 0
return
nonlocal max_time
max_time = market_df["time"].max()
min_time = max_time - offsets.Day(max_store_date)
stored_market_df = stored_market_df[stored_market_df["time"] >= min_time]
stored_news_df = stored_news_df[stored_news_df["firstCreated"] >= min_time]
predict_start_id = len(stored_market_df)
stored_market_df = pd.concat([stored_market_df, market_df], axis=0, ignore_index=True)
stored_news_df = | pd.concat([stored_news_df, news_df], axis=0, ignore_index=True) | pandas.concat |
from analytic_types.segment import Segment
import utils
import unittest
import numpy as np
import pandas as pd
import math
import random
RELATIVE_TOLERANCE = 1e-1
class TestUtils(unittest.TestCase):
#example test for test's workflow purposes
def test_segment_parsion(self):
self.assertTrue(True)
def test_confidence_all_normal_value(self):
segment = [1, 2, 0, 6, 8, 5, 3]
utils_result = utils.find_confidence(segment)[0]
result = 4.0
self.assertTrue(math.isclose(utils_result, result, rel_tol = RELATIVE_TOLERANCE))
def test_confidence_all_nan_value(self):
segment = [np.nan, np.nan, np.nan, np.nan]
self.assertEqual(utils.find_confidence(segment)[0], 0)
def test_confidence_with_nan_value(self):
data = [np.nan, np.nan, 0, 8]
utils_result = utils.find_confidence(data)[0]
result = 4.0
self.assertTrue(math.isclose(utils_result, result, rel_tol = RELATIVE_TOLERANCE))
def test_interval_all_normal_value(self):
data = [1, 2, 1, 2, 4, 1, 2, 4, 5, 6]
data = pd.Series(data)
center = 4
window_size = 2
result = [1, 2, 4, 1, 2]
self.assertEqual(list(utils.get_interval(data, center, window_size)), result)
def test_interval_wrong_ws(self):
data = [1, 2, 4, 1, 2, 4]
data = pd.Series(data)
center = 3
window_size = 6
result = [1, 2, 4, 1, 2, 4]
self.assertEqual(list(utils.get_interval(data, center, window_size)), result)
def test_subtract_min_without_nan(self):
segment = [1, 2, 4, 1, 2, 4]
segment = pd.Series(segment)
result = [0, 1, 3, 0, 1, 3]
utils_result = list(utils.subtract_min_without_nan(segment))
self.assertEqual(utils_result, result)
def test_subtract_min_with_nan(self):
segment = [np.nan, 2, 4, 1, 2, 4]
segment = pd.Series(segment)
result = [2, 4, 1, 2, 4]
utils_result = list(utils.subtract_min_without_nan(segment)[1:])
self.assertEqual(utils_result, result)
def test_get_convolve(self):
data = [1, 2, 3, 2, 2, 0, 2, 3, 4, 3, 2, 1, 1, 2, 3, 4, 3, 2, 0]
data = pd.Series(data)
pattern_index = [2, 8, 15]
window_size = 2
av_model = [1, 2, 3, 2, 1]
result = []
self.assertNotEqual(utils.get_convolve(pattern_index, av_model, data, window_size), result)
def test_get_convolve_with_nan(self):
data = [1, 2, 3, 2, np.nan, 0, 2, 3, 4, np.nan, 2, 1, 1, 2, 3, 4, 3, np.nan, 0]
data = pd.Series(data)
pattern_index = [2, 8, 15]
window_size = 2
av_model = [1, 2, 3, 2, 1]
result = utils.get_convolve(pattern_index, av_model, data, window_size)
for val in result:
self.assertFalse(np.isnan(val))
def test_get_convolve_empty_data(self):
data = []
pattern_index = []
window_size = 2
window_size_zero = 0
av_model = []
result = []
self.assertEqual(utils.get_convolve(pattern_index, av_model, data, window_size), result)
self.assertEqual(utils.get_convolve(pattern_index, av_model, data, window_size_zero), result)
def test_find_jump_parameters_center(self):
segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
segment = pd.Series(segment)
jump_center = [10, 11]
self.assertIn(utils.find_pattern_center(segment, 0, 'jump'), jump_center)
def test_find_jump_parameters_height(self):
segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
segment = pd.Series(segment)
jump_height = [3.5, 4]
self.assertGreaterEqual(utils.find_parameters(segment, 0, 'jump')[0], jump_height[0])
self.assertLessEqual(utils.find_parameters(segment, 0, 'jump')[0], jump_height[1])
def test_find_jump_parameters_length(self):
segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
segment = pd.Series(segment)
jump_length = 2
self.assertEqual(utils.find_parameters(segment, 0, 'jump')[1], jump_length)
def test_find_drop_parameters_center(self):
segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
segment = pd.Series(segment)
drop_center = [14, 15, 16]
self.assertIn(utils.find_pattern_center(segment, 0, 'drop'), drop_center)
def test_find_drop_parameters_height(self):
segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
segment = pd.Series(segment)
drop_height = [3.5, 4]
self.assertGreaterEqual(utils.find_parameters(segment, 0, 'drop')[0], drop_height[0])
self.assertLessEqual(utils.find_parameters(segment, 0, 'drop')[0], drop_height[1])
def test_find_drop_parameters_length(self):
segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
segment = pd.Series(segment)
drop_length = 2
self.assertEqual(utils.find_parameters(segment, 0, 'drop')[1], drop_length)
def test_get_av_model_empty_data(self):
patterns_list = []
result = []
self.assertEqual(utils.get_av_model(patterns_list), result)
def test_get_av_model_normal_data(self):
patterns_list = [[1, 1, 1], [2, 2, 2],[3,3,3]]
result = [2.0, 2.0, 2.0]
self.assertEqual(utils.get_av_model(patterns_list), result)
def test_find_jump_nan_data(self):
data = [np.nan, np.nan, np.nan, np.nan]
data = pd.Series(data)
length = 2
height = 3
length_zero = 0
height_zero = 0
result = []
self.assertEqual(utils.find_jump(data, height, length), result)
self.assertEqual(utils.find_jump(data, height_zero, length_zero), result)
def test_find_drop_nan_data(self):
data = [np.nan, np.nan, np.nan, np.nan]
data = pd.Series(data)
length = 2
height = 3
length_zero = 0
height_zero = 0
result = []
self.assertEqual(utils.find_drop(data, height, length), result)
self.assertEqual(utils.find_drop(data, height_zero, length_zero), result)
def test_get_distribution_density(self):
segment = [1, 1, 1, 3, 5, 5, 5]
segment = pd.Series(segment)
result = (3, 5, 1)
self.assertEqual(utils.get_distribution_density(segment), result)
def test_get_distribution_density_right(self):
data = [1.0, 5.0, 5.0, 4.0]
data = pd.Series(data)
median = 3.0
max_line = 5.0
min_line = 1.0
utils_result = utils.get_distribution_density(data)
self.assertTrue(math.isclose(utils_result[0], median, rel_tol = RELATIVE_TOLERANCE))
self.assertTrue(math.isclose(utils_result[1], max_line, rel_tol = RELATIVE_TOLERANCE))
self.assertTrue(math.isclose(utils_result[2], min_line, rel_tol = RELATIVE_TOLERANCE))
def test_get_distribution_density_left(self):
data = [1.0, 1.0, 2.0, 1.0, 5.0]
data = pd.Series(data)
median = 3.0
max_line = 5.0
min_line = 1.0
utils_result = utils.get_distribution_density(data)
self.assertTrue(math.isclose(utils_result[0], median, rel_tol = RELATIVE_TOLERANCE))
self.assertTrue(math.isclose(utils_result[1], max_line, rel_tol = RELATIVE_TOLERANCE))
self.assertTrue(math.isclose(utils_result[2], min_line, rel_tol = RELATIVE_TOLERANCE))
def test_get_distribution_density_short_data(self):
data = [1.0, 5.0]
data = pd.Series(data)
segment = [1.0]
segment = pd.Series(segment)
utils_result_data = utils.get_distribution_density(data)
utils_result_segment = utils.get_distribution_density(segment)
self.assertEqual(len(utils_result_data), 3)
self.assertEqual(utils_result_segment, (0, 0, 0))
def test_get_distribution_density_with_nans(self):
segment = [np.NaN, 1, 1, 1, np.NaN, 3, 5, 5, 5, np.NaN]
segment = pd.Series(segment)
result = (3, 5, 1)
self.assertEqual(utils.get_distribution_density(segment), result)
def test_find_pattern_jump_center(self):
data = [1.0, 1.0, 1.0, 5.0, 5.0, 5.0]
data = pd.Series(data)
median = 3.0
result = 3
self.assertEqual(result, utils.find_pattern_center(data, 0, 'jump'))
def test_get_convolve_wrong_index(self):
data = [1.0, 5.0, 2.0, 1.0, 6.0, 2.0]
data = pd.Series(data)
segemnts = [1, 11]
av_model = [0.0, 4.0, 0.0]
window_size = 1
try:
utils.get_convolve(segemnts, av_model, data, window_size)
except ValueError:
self.fail('Method get_convolve raised unexpectedly')
def test_get_av_model_for_different_length(self):
patterns_list = [[1.0, 1.0, 2.0], [4.0, 4.0], [2.0, 2.0, 2.0], [3.0, 3.0], []]
try:
utils.get_av_model(patterns_list)
except ValueError:
self.fail('Method get_convolve raised unexpectedly')
def test_find_nan_indexes(self):
data = [1, 1, 1, 0, 0, np.nan, None, []]
data = pd.Series(data)
result = [5, 6]
self.assertEqual(utils.find_nan_indexes(data), result)
def test_find_nan_indexes_normal_values(self):
data = [1, 1, 1, 0, 0, 0, 1, 1]
data = pd.Series(data)
result = []
self.assertEqual(utils.find_nan_indexes(data), result)
def test_find_nan_indexes_empty_values(self):
data = []
result = []
self.assertEqual(utils.find_nan_indexes(data), result)
def test_create_correlation_data(self):
data = [random.randint(10, 999) for _ in range(10000)]
data = pd.Series(data)
pattern_model = [100, 200, 500, 300, 100]
ws = 2
result = 6000
corr_data = utils.get_correlation_gen(data, ws, pattern_model)
corr_data = list(corr_data)
self.assertGreaterEqual(len(corr_data), result)
def test_inverse_segment(self):
data = pd.Series([1,2,3,4,3,2,1])
result = pd.Series([3,2,1,0,1,2,3])
utils_result = utils.inverse_segment(data)
for ind, val in enumerate(utils_result):
self.assertEqual(val, result[ind])
def test_get_end_of_segment_equal(self):
data = pd.Series([5,4,3,2,1,0,0,0])
result_list = [4, 5, 6]
self.assertIn(utils.get_end_of_segment(data, False), result_list)
def test_get_end_of_segment_greater(self):
data = pd.Series([5,4,3,2,1,0,1,2,3])
result_list = [4, 5, 6]
self.assertIn(utils.get_end_of_segment(data, False), result_list)
def test_get_borders_of_peaks(self):
data = | pd.Series([1,0,1,2,3,2,1,0,0,1,2,3,4,3,2,2,1,0,1,2,3,4,5,3,2,1,0]) | pandas.Series |
"""Normal-reciprocal functionality
"""
import pandas as pd
import numpy as np
def _first(x):
"""return the first item of the supplied Series"""
return x.iloc[0]
def average_repetitions(df, keys_mean):
"""average duplicate measurements. This requires that IDs and norrec labels
were assigned using the *assign_norrec_to_df* function.
Parameters
----------
df
DataFrame
keys_mean: list
list of keys to average. For all other keys the first entry will be
used.
"""
if 'norrec' not in df.columns:
raise Exception(
'The "norrec" column is required for this function to work!'
)
# Get column order to restore later
cols = list(df.columns.values)
keys_keep = list(set(df.columns.tolist()) - set(keys_mean))
agg_dict = {x: _first for x in keys_keep}
agg_dict.update({x: np.mean for x in keys_mean})
for key in ('id', 'timestep', 'frequency', 'norrec'):
if key in agg_dict:
del(agg_dict[key])
# print(agg_dict)
# average over duplicate measurements
extra_dimensions_raw = ['id', 'norrec', 'frequency', 'timestep']
extra_dimensions = [x for x in extra_dimensions_raw if x in df.columns]
df = df.groupby(extra_dimensions).agg(agg_dict)
df.reset_index(inplace=True)
return df[cols]
def compute_norrec_differences(df, keys_diff):
"""DO NOT USE ANY MORE - DEPRECIATED!
"""
raise Exception('This function is depreciated!')
print('computing normal-reciprocal differences')
# df.sort_index(level='norrec')
def norrec_diff(x):
"""compute norrec_diff"""
if x.shape[0] != 2:
return np.nan
else:
return np.abs(x.iloc[1] - x.iloc[0])
keys_keep = list(set(df.columns.tolist()) - set(keys_diff))
agg_dict = {x: _first for x in keys_keep}
agg_dict.update({x: norrec_diff for x in keys_diff})
for key in ('id', 'timestep', 'frequency'):
if key in agg_dict:
del(agg_dict[key])
# for frequencies, we could (I think) somehow prevent grouping by
# frequencies...
df = df.groupby(('timestep', 'frequency', 'id')).agg(agg_dict)
# df.rename(columns={'r': 'Rdiff'}, inplace=True)
df.reset_index()
return df
def _normalize_abmn(abmn):
"""return a normalized version of abmn
"""
abmn_2d = np.atleast_2d(abmn)
abmn_normalized = np.hstack((
np.sort(abmn_2d[:, 0:2], axis=1),
np.sort(abmn_2d[:, 2:4], axis=1),
))
return abmn_normalized
def assign_norrec_to_df(df):
"""Determine normal-reciprocal pairs for a given dataframe.
Parameters
----------
df: pandas.DataFrame
The data
Returns
-------
df_new: pandas.DataFrame
The data with two new columns: "id" and "norrec"
"""
if df.shape[0] == 0:
# empty dataframe, just return a copy
return df.copy()
c = df[['a', 'b', 'm', 'n']].values.copy()
# unique injections
cu = np.unique(c, axis=0)
print('generating ids')
# now assign unique IDs to each config in normal and reciprocal
running_index = 0
normal_ids = {}
reciprocal_ids = {}
# loop through all configurations
for i in range(0, cu.shape[0]):
# print('testing', cu[i], i, cu.shape[0])
# normalize configuration
cu_norm = _normalize_abmn(cu[i, :]).squeeze()
if tuple(cu_norm) in normal_ids:
# print('already indexed')
continue
# find pairs
indices = np.where((
# current electrodes
(
(
(cu[:, 0] == cu[i, 2]) & (cu[:, 1] == cu[i, 3])
) |
(
(cu[:, 0] == cu[i, 3]) & (cu[:, 1] == cu[i, 2])
)
) &
# voltage electrodes
(
(
(cu[:, 2] == cu[i, 0]) & (cu[:, 3] == cu[i, 1])
) |
(
(cu[:, 2] == cu[i, 1]) & (cu[:, 3] == cu[i, 0])
)
)
))[0]
# we found no pair
if len(indices) == 0:
# print('no reciprocals, continuing')
if not tuple(cu_norm) in normal_ids:
if np.min(cu_norm[0:2]) < np.min(cu_norm[2:3]):
# treat as normal
normal_ids[tuple(cu_norm)] = running_index
else:
reciprocal_ids[tuple(cu_norm)] = running_index
running_index += 1
continue
# if len(indices) > 1:
# print('found more than one reciprocals')
# normalize the first reciprocal
cu_rec_norm = _normalize_abmn(cu[indices[0], :]).squeeze()
# decide on normal or reciprocal
# print('ABREC', cu_norm[0:2], cu_rec_norm[0:2])
if np.min(cu_norm[0:2]) < np.min(cu_rec_norm[0:2]):
# print('is normal')
# normal
normal_ids[tuple(cu_norm)] = running_index
reciprocal_ids[tuple(cu_rec_norm)] = running_index
else:
normal_ids[tuple(cu_rec_norm)] = running_index
reciprocal_ids[tuple(cu_norm)] = running_index
running_index += 1
print('assigning ids')
# print(df.shape)
# print(df.columns)
# print('normal_ids', normal_ids)
# print('reciprocal_ids', reciprocal_ids)
# now convert the indices into a dataframe so we can use pd.merge
# note that this code was previously written in another way, so the
# conversion is quite cumbersome
# at one point we need to rewrite everything here...
df_nor = {item: key for key, item in normal_ids.items()}
df_nor = pd.DataFrame(df_nor).T.reset_index().rename(
{'index': 'id'}, axis=1)
df_nor['norrec'] = 'nor'
if len(normal_ids) > 0:
df_nor.columns = ('id', 'a', 'b', 'm', 'n', 'norrec')
df_nor2 = df_nor.copy()
df_nor2.columns = ('id', 'b', 'a', 'm', 'n', 'norrec')
df_nor3 = df_nor.copy()
df_nor3.columns = ('id', 'b', 'a', 'n', 'm', 'norrec')
df_nor4 = df_nor.copy()
df_nor4.columns = ('id', 'a', 'b', 'n', 'm', 'norrec')
df_ids = pd.concat(
(
df_nor,
df_nor2,
df_nor3,
df_nor4,
),
sort=True
)
else:
df_ids = pd.DataFrame()
if len(reciprocal_ids) > 0:
df_rec = {item: key for key, item in reciprocal_ids.items()}
df_rec = | pd.DataFrame(df_rec) | pandas.DataFrame |
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.preprocessing import MinMaxScaler
# set ggplot style
plt.style.use('ggplot')
sns.set_style("whitegrid")
mpl.rcParams.update({'font.size': 24})
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
class ProjectStatus(object):
def __init__(self, project_dir, project):
self.project_dir = project_dir
self.project = project
self.scaler = MinMaxScaler()
def update_project(self, project_dir, project):
self.project_dir = project_dir
self.project = project
def _faults_column_attribute(self, df):
return 'NumErrors' if 'NumErrors' in df.columns else 'Verdict'
def get_summary(self):
def get_sparline(dataframe):
# Sparklines
sparklines = "\\sparkspike 0 0" # Workaround for variants without failures
if len(dataframe) > 1:
scaled_values = self.scaler.fit_transform(dataframe)
# sparklines = f' '.join([f"\\sparkspike {i[0]} {i[1]}" for i in scaled_values])
sparklines = f' '.join([f"{i[0]} {i[1]}" for i in scaled_values])
# return "\\begin{sparkline}{15} " + os.linesep + sparklines + os.linesep + " \\end{sparkline}"
return "\\begin{sparkline}{15} " + "\\spark " + sparklines + " / \\end{sparkline}"
return ""
summary_cols = ["Name", "Period", "Builds",
"Faults", "FaultsByCycle",
"Tests", "Volatility",
"Duration", "Interval"]
summary = | pd.DataFrame(columns=summary_cols) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.