prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import re
import time
import utils
import mail
import json
import requests
import collections
import pandas as pd
from utils import log
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from http.client import RemoteDisconnected
import numpy as np
import urllib3
urllib3.disable_warnings()
retailer_urls_all={
"Sephora USA": f"http://www.sephora.com"
,"Sephora CAN": f"http://www.sephora.com/ca/en"
,"Sephora France": f"https://www.sephora.fr"
,"Sephora Middle East": f"https://www.sephora.ae"
,"Sephora SE Asia": f"http://www.sephora.sg"
,"Sephora Thailand": f"https://www.sephora.co.th"
,"Sephora AUS": f"http://www.sephora.com.au"
,"Revolve": f"http://www.revolve.com"
,"<NAME>": f"https://www.rileyrose.com"
,"Birchbox": f"http://www.birchbox.com"
# ,"Nordstrom": f"http://shop.nordstrom.com"
,"Net-A-Porter": f"https://www.net-a-porter.com"
,"Beauty Bay": f"https://www.beautybay.com"
# ,"Beauty Bay 2": f"https://www.beautybay.com"
,"Cult Beauty": f"https://www.cultbeauty.co.uk"
,"Anthropologie": f"https://www.anthropologie.com"
,"Free People": f"https://www.freepeople.com"
,"I Am Natural Store": f"https://www.iamnaturalstore.com.au"
,"Urban Outfitters": f"https://www.urbanoutfitters.com"
# ,"Naturisimo": f"https://www.naturisimo.com/"
}
df_root_urls=pd.DataFrame(list(zip(retailer_urls_all.keys(),retailer_urls_all.values())),columns=['Retailer','root_url'])
brand_pages_all={
"Sephora USA": f"http://www.sephora.com/brand/briogeo/all"
,"Sephora CAN": f"http://www.sephora.com/ca/en/brand/briogeo/all"
,"Sephora France": f"https://www.sephora.fr/marques/de-a-a-z/briogeo-briog/"
,"Sephora Middle East": f"https://www.sephora.ae/en/brands/briogeo"
,"Sephora SE Asia": f"http://www.sephora.sg/brands/briogeo?view=120"
,"Sephora Thailand": f"https://www.sephora.co.th/brands/briogeo?view=120"
,"Sephora AUS": f"https://www.sephora.com.au/brands/briogeo?view=60"
,"Revolve": f"http://www.revolve.com/briogeo/br/2e2c0b/"
,"Riley Rose": f"https://www.rileyrose.com/us/shop/catalog/category/rr/promo-branded-briogeo"
,"Birchbox": f"http://www.birchbox.com/brand/4614"
# ,"Nordstrom": f"https://shop.nordstrom.com/c/briogeo?origin=productBrandLink"
,"Net-A-Porter": f"https://www.net-a-porter.com/us/en/Shop/Designers/Briogeo?pn=1&npp=60&image_view=product&dScroll=0"
,"Beauty Bay": f"https://www.beautybay.com/l/briogeo/"
# ,"Beauty Bay 2": f"https://www.beautybay.com/l/briogeo/?f_pg=2"
,"Cult Beauty": f"https://www.cultbeauty.co.uk/briogeo"
,"Anthropologie": f"https://www.anthropologie.com/beauty-hair-care?brand=Briogeo"
,"Free People": f"https://www.freepeople.com/brands/briogeo/"
,"I Am Natural Store": f"https://www.iamnaturalstore.com.au/collections/briogeo"
,"Urban Outfitters": f"https://www.urbanoutfitters.com/brands/briogeo"
# ,"Naturisimo": f"https://www.naturisimo.com/index.cfm?nme=bri"
}
class BriogeoRetailerScraper(object):
def __init__(self, retailers=brand_pages_all.keys(), send_email=True, from_email='<EMAIL>', to_email='<EMAIL>', update_db=True):
if not isinstance(retailers, collections.KeysView):
self.brand_pages=dict(zip(retailers,[brand_pages_all.get(x) for x in retailers]))
else:
self.brand_pages=brand_pages_all
keys_to_remove=[k for k,v in self.brand_pages.items() if v is None]
for k in keys_to_remove:
log.warn('Retailer {} not in retailer dictionary. Removed.'.format(k))
self.brand_pages.pop(k)
if len(self.brand_pages)==0:
log.error('List of retailers provided was invalid/empty.')
self.send_email = send_email
self.update_db = update_db
self.driver = None
self.delay = 10
self.from_email = from_email
self.to_email = to_email
self.soup = []
self.df_prod_pages = pd.DataFrame()
def load_page_requests(self,url):
time.sleep(1+np.abs(np.random.rand()))
try:
resp=requests.get(url,verify=False)
except (requests.HTTPError, requests.ConnectionError):
#os.remove(email_filename)
#raise
log.warn('Could not connect to {}'.format(url))
self.soup = BeautifulSoup(resp.content,"lxml")
def load_page_selenium(self,url,by_cont,by_name,scroll=False,click=False,click_xpath=''):
if not self.driver:
self.driver=webdriver.Firefox()
try:
self.driver.get(url)
except ConnectionResetError:
print('ConnectionResetError 113')
time.sleep(3.5)
self.driver.get(url)
except (BrokenPipeError, OSError):
print('BrokenPipeError or OSError 117')
try:
self.driver.close()
except ConnectionResetError:
print('ConnectionResetError 121')
self.driver.close()
self.driver=None
self.driver=webdriver.Firefox()
time.sleep(2)
self.driver.get(url)
try:
wait=WebDriverWait(self.driver,self.delay)
wait.until(EC.presence_of_element_located((by_cont,by_name)))
print("page is ready")
except TimeoutException:
print("Loading took too much time")
if scroll==True:
scroll_height=self.driver.execute_script("return document.body.scrollHeight;")
scroll_to=300
loop_limit=30
loop_cnt=1
while scroll_to < scroll_height:
if loop_cnt>loop_limit:
break
self.driver.execute_script("window.scrollTo(0, {});".format(scroll_to))
time.sleep(0.8+np.abs(np.random.rand()))
scroll_height=self.driver.execute_script("return document.body.scrollHeight;")
scroll_to+=300
loop_cnt+=1
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1.5+np.abs(np.random.rand()))
if click==True:
click_link=self.driver.find_element_by_xpath(click_xpath)
click_link.click()
time.sleep(3)
try:
html = self.driver.execute_script("return document.body.outerHTML;")
except RemoteDisconnected:
print('RemoteDisconnected 155')
time.sleep(2)
html = self.driver.execute_script("return document.body.outerHTML;")
except ConnectionResetError:
print('ConnectionResetError 159')
time.sleep(1)
html = self.driver.execute_script("return document.body.outerHTML;")
except BrokenPipeError:
print('BrokenPipeError 163')
time.sleep(1)
html = self.driver.execute_script("return document.body.outerHTML;")
self.soup=BeautifulSoup(html,'lxml')
def load_and_parse_all_brand_pages(self):
for r in self.brand_pages.keys():
self.load_brand_page(r)
self.parse_brand_page_soup(r)
if 'Beauty Bay 2' in self.df_prod_pages['Retailer'].unique().tolist():
self.df_prod_pages.loc[self.df_prod_pages['Retailer']=='Beauty Bay 2','Retailer']='Beauty Bay'
def load_brand_page(self,retailer):
brand_page=self.brand_pages[retailer]
print('Loading {} brand page: {}'.format(retailer,brand_page))
if retailer in ['Sephora USA','Sephora CAN','Sephora France','Sephora Middle East','Nordstrom','Riley Rose','Revolve']:
self.load_page_requests(brand_page)
elif retailer in ['Sephora SE Asia','Sephora Thailand','Sephora AUS']:
self.load_page_selenium(brand_page,By.ID,"product-index-content")
elif retailer == 'Cult Beauty':
self.load_page_selenium(brand_page,By.CLASS_NAME,"col mainContent",scroll=False,click=True,click_xpath="/html/body/div[1]/div[1]/div[7]/div[2]/div[2]/div[2]/div[3]/div[3]/button")
elif retailer == 'Birchbox':
self.load_page_selenium(brand_page,By.CLASS_NAME,"vertical__content___2lOQc",scroll=True,click=True,click_xpath="//button[1]")
elif retailer == 'Net-A-Porter':
self.load_page_selenium(brand_page,By.ID,'page-container',scroll=True)
elif retailer in ['Beauty Bay','Beauty Bay 2']:
self.load_page_selenium(brand_page,By.CLASS_NAME,"c-product qa-product",scroll=True)
elif retailer == 'Anthropologie':
self.load_page_selenium(brand_page,By.CLASS_NAME,"dom-category-browse",scroll=True)
elif retailer == 'Free People':
self.load_page_selenium(brand_page,By.CLASS_NAME,"dom-product-tile",scroll=True)
elif retailer == 'I Am Natural Store':
self.load_page_selenium(brand_page,By.CLASS_NAME,"inner-top",scroll=True)#,click=True,click_xpath='//a[@data-translate="collections.general.show_more"]')
elif retailer == 'Urban Outfitters':
self.load_page_selenium(brand_page,By.CLASS_NAME,'dom-product-tile')
elif retailer == 'Naturisimo':
self.load_page_selenium(brand_page,By.CLASS_NAME,'product_box')
def parse_brand_page_soup(self,retailer):
soup=self.soup
### SEPHORA USA
### SEPHORA CANADA
if retailer in ['Sephora USA','Sephora CAN']:
data = soup.find('script',attrs={'id':"linkJSON"}).get_text()
output = json.loads(data)
for d in output:
if d['path']=='CatalogPage':
prods=d['props']['products']
df=pd.DataFrame(d['props']['products'])
#pd.DataFrame(list(df['currentSku'].to_dict().values()))['listPrice']
df['Retailer']=retailer
df['OOS_FewLeft']='No'
if retailer=='Sephora CAN':
df['targetUrl']=df['targetUrl'].transform(lambda x: retailer_urls_all[retailer]+x+'?country_switch=ca&lang=en')
else:
df['targetUrl']=df['targetUrl'].transform(lambda x: retailer_urls_all[retailer]+x)
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### SEPHORA FRANCE
elif retailer == 'Sephora France':
prod_divs = soup.find_all('div',attrs={'class':"product-tile clickable"})
prod_name_list=[]
prod_url_list=[]
for p in prod_divs:
p_json = json.loads(p['data-tcproduct'])
if p_json is not None:
if p_json['product_brand']=='BRIOGEO':
prod_name_list.append(p_json['product_pid_name'])
prod_url_list.append(p_json['product_url_page'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### SEPHORA MIDDLE EAST
elif retailer in ['Sephora Middle East']:
ae_ul=soup.find('ul',attrs={'class':"products-grid products-grid--max-4-col"})
ae_tags=ae_ul.find_all('a',attrs={'class':'product-image'})
prod_name_list=[]
prod_url_list=[]
for p in ae_tags:
prod_name_list.append(p['title'])
prod_url_list.append(p['href'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### SEPHORA SOUTH EAST ASIA
### SEPHORA THAILAND
### SEPHORA AUSTRALIA
elif retailer in ['Sephora SE Asia','Sephora Thailand','Sephora AUS']:
prod_ind=soup.find('div',attrs={'class':'products-grid'})
prods=prod_ind.find_all('div',attrs={'class':'product-card'})
prod_name_list=[]
prod_url_list=[]
prod_oos_list=[]
for p in prods:
prod_name_list.append(p.find('p',attrs={'class':'product-card-product'}).text)
prod_url_list.append(retailer_urls_all[retailer]+p.find('a',attrs={'class':'product-card-description'})['href'])
if p.find('div',attrs={'class':'out-of-stock'}):
prod_oos_list.append('OOS')
else:
prod_oos_list.append('No')
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list,prod_oos_list)),columns=['displayName','targetUrl','OOS_FewLeft'])
df['Retailer']=retailer
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### NORDSTROM
elif retailer == 'Nordstrom':
nd_start_pos=soup.text.find('__INITIAL_CONFIG__ = {')
nd_end_pos=soup.text.find('Server":true}}',nd_start_pos)
if nd_end_pos!=-1:
nd_jsondata=json.loads(soup.text[nd_start_pos+21:nd_end_pos+14])
else:
nd_end_pos=soup.text.find('webExtractor":{}}',nd_start_pos)
nd_jsondata=json.loads(soup.text[nd_start_pos+21:nd_end_pos+17])
prod_name_list=[]
prod_url_list=[]
for num,p in enumerate(nd_jsondata['viewData']['productsById'].keys(),1):
prod_name_list.append(nd_jsondata['viewData']['productsById'][p]['name'])
prod_url_list.append(retailer_urls_all[retailer]+nd_jsondata['viewData']['productsById'][p]['productPageUrl'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### RILEY ROSE
elif retailer == '<NAME>':
rr_scripts=soup.find_all('script',attrs={'type':'text/javascript'})
### TO DO: MAKE THIS A SEARCH THROUGH SCRIPTS FOR VAR CDATA
rr_start_pos=rr_scripts[-2].text.find('var cData =')
rr_end_pos=rr_scripts[-2].text.find('"};',rr_start_pos)
rr_jsondata = json.loads(rr_scripts[-2].text[rr_start_pos+12:rr_end_pos+2])
prod_name_list=[]
prod_url_list=[]
prod_oos_list=[]
for p in rr_jsondata['CatalogProducts']:
prod_name_list.append(p['DisplayName'])
prod_url_list.append(p['ProductShareLinkUrl'])
n_left=int(p['Variants'][0]['Sizes'][0]['LowStockMessage'])
if p['IsOOS']==False and n_left>10:
oos_value='No'
elif n_left<=10:
oos_value='{} Left'.format(n_left)
else:
oos_value='OOS'
prod_oos_list.append(oos_value)
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list,prod_oos_list)),columns=['displayName','targetUrl','OOS_FewLeft'])
df['Retailer']=retailer
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### REVOLVE
elif retailer == 'Revolve':
prod_list=soup.find_all('li',attrs={'class':"js-plp-container"})
prod_name_list=[]
prod_url_list=[]
prod_oos_list=[]
for p in prod_list:
link=p.find('a',attrs={'class':"js-plp-pdp-link"})
hovr_btn=p.find('a',attrs={'class':"image-hover__btn image-hover__btn--focusable js-plp-quickview"})
name=re.split(r"^PREORDER |^QUICK VIEW |^SOLD OUT ",hovr_btn['aria-label'])[1]
#print(link["href"], hovr_btn['aria-label'])
if hovr_btn.get_text(strip=True)=='PREORDER' or hovr_btn.get_text(strip=True)=='SOLD OUT':
oos_value='OOS'
else:
oos_value='No'
prod_name_list.append(name)
prod_url_list.append(retailer_urls_all[retailer]+link['href'])
prod_oos_list.append(oos_value)
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list,prod_oos_list)),columns=['displayName','targetUrl','OOS_FewLeft'])
df['Retailer']=retailer
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### BIRCHBOX
elif retailer == 'Birchbox':
prod_list=soup.find_all('a',attrs={'class':'productThumb__title___1D-Rj'})
prod_name_list=[]
prod_url_list=[]
for p in prod_list:
if 'href' in p.attrs.keys():
prod_name_list.append(p.text)
prod_url_list.append(retailer_urls_all[retailer]+p['href'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### NET-A-PORTER
elif retailer == 'Net-A-Porter':
prod_list=soup.find_all('div',attrs={'class':'product-image'})
prod_name_list=[]
prod_url_list=[]
for p in prod_list:
prod_name_list.append(p.find('a').find('img')['alt'])
prod_url_list.append(retailer_urls_all[retailer]+p.find('a')['href'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### BEAUTY BAY
elif retailer in ['Beauty Bay','Beauty Bay 2']:
list_grid=soup.find_all('div',attrs={'class':'lister-grid'})
prod_list=list_grid[0].find_all('a')
#prod_list=soup.find_all('a',attrs={'class':'c-product qa-product'})
prod_name_list=[]
prod_url_list=[]
prod_oos_list=[]
for p in prod_list:
#prod_name_list.append(p.find('img')['alt'])
#prod_url_list.append(retailer_urls_all[retailer]+p['href'])
prod_url_list.append(retailer_urls_all[retailer]+p['href'])
img_list=p.find_all('img')
prod_name_list.append(img_list[0]['alt'])
if p.find('div',attrs={'class':'lister-tile out-of-stock'}):
oos_value='Yes'
else:
oos_value='No'
prod_oos_list.append(oos_value)
#df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list,prod_oos_list)),columns=['displayName','targetUrl','OOS_FewLeft'])
df['Retailer']=retailer
#df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### CULT BEAUTY
elif retailer == 'Cult Beauty':
prod_list=soup.find_all('div',attrs={'class':'productGridItem'})
prod_name_list=[]
prod_url_list=[]
for p in prod_list:
prod_name_list.append(p['data-name'])
long_url=p.find('a')['href']
end_url_loc=long_url.find('.html#')
prod_url_list.append(retailer_urls_all[retailer]+long_url[0:end_url_loc+5])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### ANTHROPOLOGIE
elif retailer == 'Anthropologie':
prod_list=soup.find_all('a',attrs={'class':'c-product-tile__image-link js-product-tile__image-link'})
prod_name_list=[]
prod_url_list=[]
for p in prod_list:
img=p.find('img')
prod_name_list.append(img['alt'])
prod_url_list.append(retailer_urls_all[retailer]+p['href'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### FREE PEOPLE
elif retailer == 'Free People':
prod_list=soup.find_all('div',attrs={'class':'dom-product-tile'})
prod_name_list=[]
prod_url_list=[]
for p in prod_list:
prod_name_list.append(p.find('meta',attrs={'itemprop':'name'})['content'])
prod_url_list.append(p.find('meta',attrs={'itemprop':'url'})['content'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### I AM NATURAL STORE
elif retailer == 'I Am Natural Store':
prod_list=soup.find_all('div',attrs={'class':'inner-top'})
prod_name_list=[]
prod_url_list=[]
prod_oos_list=[]
for p in prod_list:
oos_value=''
prod_url_list.append(retailer_urls_all[retailer]+p.find('a')['href'])
prod_name_list.append(p.find('a').find('img')['alt'].replace('<br>',''))
prod_dat = json.loads(p.find('a',attrs={'title':'Quick View'})['data-istockvariants'])
if len(prod_dat)>1:
for x in range(len(prod_dat)):
if prod_dat[x]['inventory_quantity']==0:
oos_value+=(' {} OOS'.format(prod_dat[x]['title']))
else:
if prod_dat[0]['inventory_quantity']==0:
oos_value='OOS'
else:
oos_value='No'
prod_oos_list.append(oos_value)
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list,prod_oos_list)),columns=['displayName','targetUrl','OOS_FewLeft'])
df['Retailer']=retailer
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### URBAN OUTFITTERS
elif retailer == 'Urban Outfitters':
prod_list=self.soup.find_all('span',attrs={'itemprop':'product'})
prod_name_list=[]
prod_url_list=[]
for p in prod_list:
prod_url_list.append(p.find('meta',attrs={'itemprop':'url'})['content'])
prod_name_list.append(p.find('meta',attrs={'itemprop':'name'})['content'])
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list)),columns=['displayName','targetUrl'])
df['Retailer']=retailer
df['OOS_FewLeft']='No'
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
### NATURISIMO
elif retailer == 'Naturisimo':
prod_list=self.soup.find_all('div',attrs={'class':'product_box'})
prod_name_list=[]
prod_url_list=[]
prod_oos_list=[]
for p in prod_list:
prod_url_list.append(retailer_urls_all[retailer]+p.find('a')['href'])
prod_name_list.append(p.find('div',attrs={'class':'product_name'}).text)
if p.find('form'):
prod_oos_list.append('No')
else:
prod_oos_list.append('OOS')
df=pd.DataFrame(list(zip(prod_name_list,prod_url_list,prod_oos_list)),columns=['displayName','targetUrl','OOS_FewLeft'])
df['Retailer']=retailer
df_brandpage_prods=df[['Retailer','displayName','targetUrl','OOS_FewLeft']]
self.df_prod_pages=
|
pd.concat([self.df_prod_pages,df_brandpage_prods],ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Functions for estimating electricity prices, eeg levies, remunerations and other components, based on customer type and annual demand
@author: Abuzar and Shakhawat
"""
from typing import ValuesView
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
def calculate_mean_price(customer_type, val_yearly_demand):
"""
Parameters
----------
customer_type : Type of customer, differentiated between household and industrial customers
total_demand : yearly electricity demand for household customers in KWh/y and for industrial customers in MWh/y
Returns
-------
mean_price: average price for the customer for the next year in cents/kWh
"""
def plotting(x,y, title, x_label, y_label, name_plot):
fig = plt.figure()
values = x
plt.plot (x,y)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(x,values)
plt.xticks(rotation = 45)
fig.savefig(name_plot, dpi=fig.dpi)
def haupt_tarif(data):
#haupt_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
haupt_tarrif = df_with_data[df_with_data["hour"].isin([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])]
cond = df_with_data['hour'].isin(haupt_tarrif['hour'])
df_with_data.drop(haupt_tarrif[cond].index, inplace = True)
ht_factor = haupt_tarrif.price.mean()/yearly_mean
return ht_factor
def neben_tarif(data):
#neben_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
neben_tarrif = df_with_data[(df_with_data["hour"].isin([1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])) |(df_with_data["Day"].isin(['Saturday', 'Sunday']))]
neben_tarrif.head()
cond = df_with_data['hour'].isin(neben_tarrif['hour'])
df_with_data.drop(neben_tarrif[cond].index, inplace = True)
nt_factor = neben_tarrif.price.mean()/yearly_mean
return nt_factor
ht_factor = haupt_tarif("ht_nt_price.xlsx")
nt_factor = neben_tarif("ht_nt_price.xlsx")
#industrial 2000 - 20000 MWh
industrie_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.3 Strom - € - Industrie', skiprows = 5, nrows = 26, index_col = 0)
industrie_prices_without_VAT = industrie_prices_without_VAT.iloc[:,0]
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT["index"]= industrie_prices_without_VAT["index"].str.slice(start = 5)
industrie_prices_without_VAT.columns = ["year","price"]
industrie_prices_without_VAT = industrie_prices_without_VAT.set_index("year")
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index = pd.to_datetime(industrie_prices_without_VAT.index, errors='ignore')
industrie_prices_without_VAT = industrie_prices_without_VAT.astype(float)
industrie_prices_without_VAT = industrie_prices_without_VAT.resample('12M').mean()
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index= industrie_prices_without_VAT.index.str.slice(start = 0, stop = -6)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT.price * ht_factor
nt_industrie_prices_without_VAT = industrie_prices_without_VAT.price * nt_factor
ht_industrie_prices_without_VAT = ht_industrie_prices_without_VAT.reset_index()
nt_industrie_prices_without_VAT = nt_industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT[industrie_prices_without_VAT.year >= str(2016)]
#industrial prices > 150000 MWh/y
v_big_industrial_prices_BDEW = {'year': range(2019,2021), 'price': [3.77,3.05]}
v_big_industrial_prices_BDEW = pd.DataFrame(data=v_big_industrial_prices_BDEW)
v_big_industrial_prices_BDEW
#industrial prices between 70000-150000 MWh/y
big_industrial_prices_BDEW = {'year': range(2016,2021), 'price': [8.37, 9.96, 8.96, 9.28, 10.07]}
big_industrial_prices_BDEW =
|
pd.DataFrame(data=big_industrial_prices_BDEW)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import tensorflow as tf
from patchwork.feature._multitask import _encode_classes, _dataframe_to_classes
from patchwork.feature._multitask import _assemble_full_network
def test_encode_classes():
train =
|
pd.Series(["foo", "bar", "bar", np.nan])
|
pandas.Series
|
import logging
from pathlib import Path
from typing import List, Union, Optional, Tuple, Dict
import numpy as np
import pandas as pd
from .datasets import Datasets
from .metadata import DatasetId
class MultiDatasetManager(Datasets):
"""Provides read-only access to multiple benchmark datasets collections and their meta-information.
Manages dataset collections and their meta-information that are stored in multiple folders. The entries in all
index files must be unique and are NOT allowed to overlap! This would lead to information loss!
Parameters
----------
data_folders : list of paths
List of data paths that hold the datasets and the index files.
custom_datasets_file : path
Path to a file listing additional custom datasets.
Raises
------
FileNotFoundError
If the *datasets.csv*-file was not found in any of the `data_folders`.
See Also
--------
:class:`timeeval.datasets.Datasets`
:class:`timeeval.datasets.DatasetManager`
"""
def __init__(self, data_folders: List[Union[str, Path]], custom_datasets_file: Optional[Union[str, Path]] = None):
self._log_: logging.Logger = logging.getLogger(self.__class__.__name__)
self._filepaths = [Path(folder) / self.INDEX_FILENAME for folder in data_folders]
existing_files = np.array([p.exists() for p in self._filepaths])
if not np.all(existing_files):
missing = np.array(self._filepaths)[~existing_files]
missing = [str(p) for p in missing]
raise FileNotFoundError(f"Could not find the index files ({', '.join(missing)}). "
"Is your data_folders parameter correct?")
else:
path_mapping, df = self._load_df()
self._root_path_mapping: Dict[Tuple[str, str], Path] = path_mapping
super().__init__(df, custom_datasets_file)
@property
def _log(self) -> logging.Logger:
return self._log_
def _load_df(self) -> Tuple[Dict[Tuple[str, str], Path], pd.DataFrame]:
"""Read the dataset metadata from the index files."""
df = pd.DataFrame()
root_path_mapping = {}
for path in self._filepaths:
df_new =
|
pd.read_csv(path, index_col=["collection_name", "dataset_name"])
|
pandas.read_csv
|
import math
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import sklearn
from math import sqrt
import matplotlib.pyplot as plt
from pandas import DataFrame
from pandas import concat
class DataLoader():
"""A class for loading and transforming data for the lstm model"""
def __init__(self, filename, split, cols):
dataframe =
|
pd.read_csv(filename, header=0, index_col=0)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from dask.utils import raises
import dask.dataframe as dd
from dask.dataframe.utils import eq, assert_dask_graph
def groupby_internal_repr():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
gp = pdf.groupby('y')
dp = ddf.groupby('y')
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._pd, pd.core.groupby.DataFrameGroupBy)
assert isinstance(dp.obj, dd.DataFrame)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby('y')['x']
dp = ddf.groupby('y')['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._pd, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby('y')[['x']]
dp = ddf.groupby('y')[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._pd, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)['x']
dp = ddf.groupby(ddf.y)['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._pd, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)[['x']]
dp = ddf.groupby(ddf.y)[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._pd, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert eq(dp.obj, gp.obj)
def groupby_error():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
with tm.assertRaises(KeyError):
ddf.groupby('A')
with tm.assertRaises(KeyError):
ddf.groupby(['x', 'A'])
dp = ddf.groupby('y')
msg = 'Column not found: '
with tm.assertRaisesRegexp(KeyError, msg):
dp['A']
with tm.assertRaisesRegexp(KeyError, msg):
dp[['x', 'A']]
def groupby_internal_head():
pdf = pd.DataFrame({'A': [1, 2] * 10,
'B': np.random.randn(20),
'C': np.random.randn(20)})
ddf = dd.from_pandas(pdf, 3)
assert eq(ddf.groupby('A')._head().sum(),
pdf.head().groupby('A').sum())
assert eq(ddf.groupby(ddf['A'])._head().sum(),
pdf.head().groupby(pdf['A']).sum())
assert eq(ddf.groupby(ddf['A'] + 1)._head().sum(),
pdf.head().groupby(pdf['A'] + 1).sum())
def test_full_groupby():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
assert raises(Exception, lambda: d.groupby('does_not_exist'))
assert raises(Exception, lambda: d.groupby('a').does_not_exist)
assert 'b' in dir(d.groupby('a'))
def func(df):
df['b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
def test_groupby_dir():
df = pd.DataFrame({'a': range(10), 'b c d e': range(10)})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby('a')
assert 'a' in dir(g)
assert 'b c d e' not in dir(g)
def test_groupby_on_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
e = d.set_index('a')
efull = full.set_index('a')
assert eq(d.groupby('a').b.mean(), e.groupby(e.index).b.mean())
def func(df):
df.loc[:, 'b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func).set_index('a'),
e.groupby(e.index).apply(func))
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
assert eq(d.groupby('a').apply(func).set_index('a'),
full.groupby('a').apply(func).set_index('a'))
assert eq(efull.groupby(efull.index).apply(func),
e.groupby(e.index).apply(func))
def test_groupby_multilevel_getitem():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
cases = [(ddf.groupby('a')['b'], df.groupby('a')['b']),
(ddf.groupby(['a', 'b']), df.groupby(['a', 'b'])),
(ddf.groupby(['a', 'b'])['c'], df.groupby(['a', 'b'])['c']),
(ddf.groupby('a')[['b', 'c']], df.groupby('a')[['b', 'c']]),
(ddf.groupby('a')[['b']], df.groupby('a')[['b']]),
(ddf.groupby(['a', 'b', 'c']), df.groupby(['a', 'b', 'c']))]
for d, p in cases:
assert isinstance(d, dd.groupby._GroupBy)
assert isinstance(p, pd.core.groupby.GroupBy)
assert eq(d.sum(), p.sum())
assert eq(d.min(), p.min())
assert eq(d.max(), p.max())
assert eq(d.count(), p.count())
assert eq(d.mean(), p.mean().astype(float))
def test_groupby_multilevel_agg():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
sol = df.groupby(['a']).mean()
res = ddf.groupby(['a']).mean()
assert eq(res, sol)
sol = df.groupby(['a', 'c']).mean()
res = ddf.groupby(['a', 'c']).mean()
assert eq(res, sol)
def test_groupby_get_group():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 2, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
ddgrouped = d.groupby(ddkey)
pdgrouped = full.groupby(pdkey)
# DataFrame
assert eq(ddgrouped.get_group(2), pdgrouped.get_group(2))
assert eq(ddgrouped.get_group(3), pdgrouped.get_group(3))
# Series
assert eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))
assert eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))
def test_dataframe_groupby_nunique():
strings = list('aaabbccccdddeee')
data = np.random.randn(len(strings))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
def test_dataframe_groupby_nunique_across_group_same_value():
strings = list('aaabbccccdddeee')
data = list(map(int, '123111223323412'))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
def test_series_groupby_propagates_names():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
ddf = dd.from_pandas(df, 2)
func = lambda df: df['y'].sum()
result = ddf.groupby('x').apply(func, columns='y')
expected = df.groupby('x').apply(func)
expected.name = 'y'
assert eq(result, expected)
def test_series_groupby():
s = pd.Series([1, 2, 2, 1, 1])
pd_group = s.groupby(s)
ss = dd.from_pandas(s, npartitions=2)
dask_group = ss.groupby(ss)
pd_group2 = s.groupby(s + 1)
dask_group2 = ss.groupby(ss + 1)
for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:
assert eq(dg.count(), pdg.count())
assert eq(dg.sum(), pdg.sum())
assert eq(dg.min(), pdg.min())
assert eq(dg.max(), pdg.max())
def test_series_groupby_errors():
s = pd.Series([1, 2, 2, 1, 1])
ss = dd.from_pandas(s, npartitions=2)
msg = "Grouper for '1' not 1-dimensional"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([1, 2]) # pandas
with tm.assertRaisesRegexp(ValueError, msg):
ss.groupby([1, 2]) # dask should raise the same error
msg = "Grouper for '2' not 1-dimensional"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([2]) # pandas
with tm.assertRaisesRegexp(ValueError, msg):
ss.groupby([2]) # dask should raise the same error
msg = "No group keys passed!"
with tm.assertRaisesRegexp(ValueError, msg):
s.groupby([]) # pandas
with tm.assertRaisesRegexp(ValueError, msg):
ss.groupby([]) # dask should raise the same error
sss = dd.from_pandas(s, npartitions=3)
assert raises(NotImplementedError, lambda: ss.groupby(sss))
with
|
tm.assertRaises(KeyError)
|
pandas.util.testing.assertRaises
|
"""
Builds the fundamental dataset for top N market cap equitities from WRDS.
Requires WRDS account. Enter username and password when prompted.
# N = number of securities sorted by market cap
# Exclude GICS codes
Features: active, date, gvkey, year, month, mom1m, mom3m, mom6m, mom9m,
mrkcap, entval, saleq_ttm, cogsq_ttm, xsgaq_ttm, oiadpq_ttm,
niq_ttm, cheq_mrq, rectq_mrq, invtq_mrq, acoq_mrq,
ppentq_mrq, aoq_mrq, dlcq_mrq, apq_mrq, txpq_mrq,
lcoq_mrq, ltq_mrq, csho_1yr_avg
It takes around 30 mins to build the dataset for N=100 and date starting from 1980-01-01
"""
import wrds
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import numpy as np
import pickle
from time import time
from wrds_data_processing import DataProcessing
from configparser import SafeConfigParser, NoOptionError
import argparse as ap
import sys
start_time = time()
# Parse arguments
parser = ap.ArgumentParser(description="Build Data from WRDS")
parser.add_argument("--N",default=10,type=int,
help="Number of equities sorted by market cap")
parser.add_argument("--exclude_gics", default=[],
help="Excludes the industries with list of GICS codes")
parser.add_argument("--filename", help="Name of the output data file",
required = True)
parser.add_argument("--test_mode",default='no',help="Test mode with small N")
args = vars(parser.parse_args())
N = args['N']
try:
exclude_gics = args['exclude_gics'].split(',')
except AttributeError:
exclude_gics = args['exclude_gics']
out_filename = args['filename']
test_mode = args['test_mode']
# Connect to WRDS data engine
db = wrds.Connection()
#############################################################################
#### SQL Query-----------------------------------------------------------####
#############################################################################
# Initialize dictionary to store top N gvkeys for every month
top_gvkey_month = {}
top_N_eq_gvkey_list_all = set()
start_date = '2013-01-01'
curr_date = datetime.datetime.strptime(start_date,'%Y-%m-%d')
# Go through months starting with the start date and find top N companies by mrk cap
# for that month.
# Reference df for primary security
q10 = ("select gvkey,primiss from compm.secm")
primiss_df = db.raw_sql(q10)
while curr_date < datetime.datetime.now():
# prev_date = curr_date + relativedelta(months=-3)
curr_date_string = curr_date.strftime('%Y-%m-%d')
# prev_date_string = prev_date.strftime('%Y-%m-%d')
print(curr_date.date())
# Query to get list of companies with top 2000 market cap for the given month
q1a = ("select distinct a.gvkey,a.latest,b.cshoq,b.prccq,b.mkvaltq,b.cshoq*b.prccq as market_cap,b.curcdq "
"from "
"(select gvkey,max(datadate) as latest "
"from "
"compm.fundq where datadate < '%s' "
"group by gvkey) a inner join "
"(select gvkey,datadate,mkvaltq,cshoq,prccq,curcdq "
"from compm.fundq where cshoq>0 and prccq>0 and curcdq='USD' and mkvaltq>0) b "
"on a.gvkey = b.gvkey and a.latest=b.datadate "
"order by market_cap desc "
"limit %i")%(curr_date_string,N)
mrk_df = db.raw_sql(q1a)
# merge the security flag
mrk_df = mrk_df.merge(primiss_df,on='gvkey',how='left')
gvkey_list_month = mrk_df['gvkey'][mrk_df['primiss']=='P'].values.tolist()
top_gvkey_month[curr_date.date()] = gvkey_list_month
top_N_eq_gvkey_list_all |= set(gvkey_list_month)
# increment the date for next month
curr_date = curr_date + relativedelta(months=1)
top_N_eq_gvkey_list_all = list(top_N_eq_gvkey_list_all)
# Query to get GIC codes and remove the exclude_gics list
q1b = ("select gvkey,gsector "
"from compa.company ")
df_gic = db.raw_sql(q1b)
exclude_gvkey_list = df_gic['gvkey'][df_gic['gsector'].isin(exclude_gics)].tolist()
# remove gvkey of associated gic code from the main list
top_N_eq_gvkey_list = [k for k in top_N_eq_gvkey_list_all if k not in exclude_gvkey_list]
# Check for continuation of companies and update their status (active or not)
# Compare the gvkey list with the most recent list if it exists
# Update the current gvkey list with the inactive ones
# Read the gvkey config file which contains the most recent list
config_gvkey = SafeConfigParser()
config_gvkey.read('gvkey-hist.dat')
config_gvkey.set('gvkey_list', '# Used to keep track of most recent requity list. No need to edit', '')
# Initialize active dict
active = {key: 1 for key in top_N_eq_gvkey_list}
if test_mode != 'yes':
try:
mr_gvk_list = config_gvkey.get('gvkey_list', 'most_recent_list').split(',')
inactive_list = [k for k in mr_gvk_list if k not in top_N_eq_gvkey_list]
# Add inactive gvkey
for inactive_gvk in inactive_list:
active[inactive_gvk] = 0
# Update the current gvkey list with the inactive ones
top_N_eq_gvkey_list = list(set().union(top_N_eq_gvkey_list,inactive_list))
# create the most recent list in the config file if it doesn't exist
config_gvkey.set('gvkey_list', 'most_recent_list', ','.join(top_N_eq_gvkey_list))
except NoOptionError:
# create the most recent list in the config file if it doesn't exist
config_gvkey.set('gvkey_list', 'most_recent_list', ','.join(top_N_eq_gvkey_list))
# save to a file
with open('gvkey-hist.dat', 'w') as configfile:
config_gvkey.write(configfile)
# change format to be compatible with sql query
top_N_eq_gvkey = tuple(["'%s'"%str(i) for i in top_N_eq_gvkey_list])
top_N_eq_gvkey = ",".join(top_N_eq_gvkey)
# Query to get fundamental Data
q2 = ("select datadate,gvkey,tic,saleq,cogsq,xsgaq,oiadpq,niq,"
"cheq, rectq, invtq, acoq, ppentq, aoq, dlcq, apq, txpq, lcoq, ltq, dlttq, cshoq, seqq, atq "
"from compm.fundq "
"where gvkey in (%s) and datadate > '%s' ")%(top_N_eq_gvkey,start_date)
fundq_df = db.raw_sql(q2)
# Add gics_sector as a column
fundq_df = pd.merge(fundq_df,df_gic,how='left',on=['gvkey'])
# Query to get price data
q3 = ("select gvkey,datadate,prccm,ajexm "
"from compm.secm "
"where gvkey in (%s) ")%top_N_eq_gvkey
price_df_all = db.raw_sql(q3).sort_values('datadate')
price_df_all.datadate = pd.to_datetime(price_df_all.datadate,format='%Y-%m-%d')
# Query to get stock_split data
q4 = ("select gvkey,datadate,split "
"from compm.sec_split "
"where gvkey in (%s) ")%top_N_eq_gvkey
stock_split_df_all = db.raw_sql(q4).sort_values('datadate')
stock_split_df_all.datadate = pd.to_datetime(stock_split_df_all.datadate,format='%Y-%m-%d')
####--END OF SQL QUERYING-------------------------------------------------------
# Build balance sheet features
blnc_sheet_list = ['cheq','rectq','invtq','acoq','ppentq','aoq',
'dlcq','apq','txpq','lcoq','ltq','dlttq','cshoq','seqq','atq']
# Build income sheet features
income_list = ['saleq','cogsq','xsgaq','oiadpq','niq']
gvkey_list = top_N_eq_gvkey_list
print("Total Number of Equities in the dataset: %i"%len(gvkey_list))
print('\n')
df_all = fundq_df[['gvkey','gsector','datadate'] + income_list + blnc_sheet_list]
df_all['active'] = np.nan
def reorder_cols():
a = ['active','datadate','gvkey','gsector','year','month']
mom = ['mom1m','mom3m','mom6m','mom9m']
prc = ['mrkcap','entval']
ttm_list_tmp = [x + '_ttm' for x in income_list]
mrq_list_tmp = [x + '_mrq' for x in blnc_sheet_list]
mrq_list_tmp.remove('cshoq_mrq')
mrq_list_tmp.remove('dlttq_mrq')
csho = ['csho_1yr_avg']
price = ['adjusted_price','prccm','ajexm']
new_order = a + mom + prc + ttm_list_tmp + mrq_list_tmp + csho + price
return new_order
# Create empty df to be appended for each equity
df_all_eq = pd.DataFrame(columns=reorder_cols())
# Start filling data by gvkey
for jj,key in enumerate(gvkey_list):
try:
t0=time()
# print("GVKEY: %s"%key)
df = df_all[df_all['gvkey'] == key].copy()
df = df.sort_values('datadate')
df = df.set_index('datadate',drop=False)
df = df[~df.index.duplicated(keep='first')]
# print("df shape:%g,%g"%df.shape)
# get price_df for the current gvkey
price_df = price_df_all[price_df_all['gvkey']==key].copy()
# print("price df shape:%g,%g"%price_df.shape)
# get stock_split_df for the current gvkey
stock_split_df = stock_split_df_all[stock_split_df_all['gvkey']==key].copy()
# print("stock split df shape:%g,%g"%stock_split_df.shape)
# print("\n")
# Start data processing
dp = DataProcessing(lag=3, monthly_active_gvkey=top_gvkey_month)
# Add the lag to the date index
df = dp.add_lag(df)
# Create new df with monthly frequency (empty)
new_df_empty = dp.create_df_monthly(df)
# Add ttm and mrq data
ttm_mrq_df = dp.create_ttm_mrq(df, new_df_empty)
# Add price information
df_w_price, price_df_for_mom = dp.add_price_features(ttm_mrq_df, price_df)
# Add momentum features
df_w_mom = dp.get_mom(df_w_price, price_df_for_mom, [1, 3, 6, 9])
# Add csho_1_year average
df_w_mom['csho_1yr_avg'] = df_w_mom['cshoq_mrq'].rolling(12, min_periods=1).mean()
# Reorder column names
new_order = reorder_cols()
del df, price_df, stock_split_df
df_out = df_w_mom[new_order]
# Fill Nans with 0.0
df_out = df_out.fillna(0.0)
df_out = df_out.reset_index(drop=True)
# Append the current df to the full_df
df_all_eq = df_all_eq.append(df_out, ignore_index=True)
print("%i GVKEY: %s, Time %2.2f"%(jj, key, time()-t0))
except (ValueError, IndexError):
pass
# Normalize the momentum features
dates = df_all_eq['datadate'].unique()
mom_f = ['mom1m', 'mom3m', 'mom6m', 'mom9m']
for date in dates:
date =
|
pd.Timestamp(date)
|
pandas.Timestamp
|
""" test the scalar Timedelta """
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat, isnull)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
self.assertRaises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), iNaT)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10, 'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100, 'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000, 'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000, 'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000, 'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000, 'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1, 's')))
self.assertEqual(ct('06:00:01'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.0'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.01'), conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
self.assertEqual(ct('- 1days, 00:00:01'),
conv(-d1 + np.timedelta64(1, 's')))
self.assertEqual(ct('1days, 06:00:01'), conv(
d1 + np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so might be some loss of
# precision
self.assertTrue(np.allclose(result.value / 1000, expected.value /
1000))
# sum
self.assertRaises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
self.assertRaises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v, v_p)
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
self.assertEqual(hash(v), hash(td))
d = {td: 2}
self.assertEqual(d[v], 2)
tds = timedelta_range('1 second', periods=20)
self.assertTrue(all(hash(td) == hash(td.to_pytimedelta()) for td in
tds))
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
self.assertNotEqual(hash(ns_td), hash(ns_td.to_pytimedelta()))
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
self.assertTrue(min_td.value == np.iinfo(np.int64).min + 1)
self.assertTrue(max_td.value == np.iinfo(np.int64).max)
# Beyond lower limit, a NAT before the Overflow
self.assertIsInstance(min_td - Timedelta(1, 'ns'),
NaTType)
with tm.assertRaises(OverflowError):
min_td - Timedelta(2, 'ns')
with tm.assertRaises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
self.assertIsInstance(td, NaTType)
with tm.assertRaises(OverflowError):
|
Timedelta(min_td.value - 2, 'ns')
|
pandas.Timedelta
|
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# It analyzes how clusters of traits were grouped across the ensemble partitions. For example, a stable cluster (obtained from consensus partitions) of cardiovascular diseases can show that all traits were always grouped together across all partitions of the ensemble; another cluster might show that some traits were clustered more often than others, representing a less stable group of traits.
# %% [markdown] tags=[]
# # Modules loading
# %% tags=[]
# %load_ext autoreload
# %autoreload 2
# %% tags=[]
from IPython.display import display
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from utils import generate_result_set_name
import conf
# %% [markdown] tags=[]
# # Settings
# %% tags=[]
CONSENSUS_CLUSTERING_DIR = Path(
conf.RESULTS["CLUSTERING_DIR"], "consensus_clustering"
).resolve()
display(CONSENSUS_CLUSTERING_DIR)
# %% [markdown] tags=[]
# ## Load data
# %% tags=[]
INPUT_SUBSET = "umap"
# %% tags=[]
INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores"
# %% tags=[]
DR_OPTIONS = {
"n_components": 5,
"metric": "euclidean",
"n_neighbors": 15,
"random_state": 0,
}
# %% tags=[]
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
generate_result_set_name(
DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl"
),
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
# %% tags=[]
data_umap =
|
pd.read_pickle(input_filepath)
|
pandas.read_pickle
|
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas as pd
import pandas.util.testing as tm
import pytest
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.discrete.discrete_model import Logit
# FIXME: do not leave commented-out, enable or move/remove
# class TestDates(object):
# @classmethod
# def setup_class(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
# HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
# FIXME: see if this can be de-hacked
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays2dEndog, cls).setup_class()
cls.endog = np.random.random((10, 1))
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays1dExog, cls).setup_class()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:, None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
tm.assert_series_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
tm.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
tm.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestDataFramesWithMultiIndex(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
mi = pd.MultiIndex.from_product([['x'], ['1', '2']])
exog = pd.DataFrame(np.random.random((10, 2)), columns=mi)
exog_flattened_idx = pd.Index(['const', 'x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input, index=exog_flattened_idx)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog_flattened_idx,
columns=exog_flattened_idx)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
class TestLists(TestArrays):
@classmethod
def setup_class(cls):
super(TestLists, cls).setup_class()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestRecarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestStructarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestStructarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestListDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10).tolist()
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.Series(np.random.random(10), name='y_1')
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_series_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestSeriesSeries(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.Series(np.random.random(10), name='y_1')
exog = pd.Series(np.random.random(10), name='x_1')
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=[exog.name])
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=[exog.name],
columns=[exog.name])
cls.xnames = ['x_1']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_series_equal(self.data.orig_endog, self.endog)
tm.assert_series_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:, None])
def test_alignment():
# Fix Issue GH#206
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
# growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they will not conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# python vaccination_adaptive_hybrid_autosearch_conform.py MSA_NAME VACCINATION_TIME VACCINATION_RATIO consider_hesitancy ACCEPTANCE_SCENARIO w1 w2 w3 w4 w5 quick_test
# python vaccination_adaptive_hybrid_autosearch_conform.py Atlanta 15 0.1 True cf18 1 1 1 1 1 False
import setproctitle
setproctitle.setproctitle("covid-19-vac@chenlin")
import sys
import os
import datetime
import pandas as pd
import numpy as np
import pickle
import time
import pdb
from skcriteria import Data, MIN
from skcriteria.madm import closeness
import constants
import functions
#import disease_model_only_modify_attack_rates
import disease_model_diff_acceptance
###############################################################################
# Constants
root = '/data/chenlin/COVID-19/Data'
timestring='20210206'
MIN_DATETIME = datetime.datetime(2020, 3, 1, 0)
MAX_DATETIME = datetime.datetime(2020, 5, 2, 23)
NUM_DAYS = 63
NUM_GROUPS = 5
# Vaccination protection rate
PROTECTION_RATE = 1
# Policy execution ratio
EXECUTION_RATIO = 1
# Recheck interval: After distributing some portion of vaccines, recheck the most vulnerable demographic group
RECHECK_INTERVAL = 0.01
###############################################################################
# Main variable settings
MSA_NAME = sys.argv[1]; #MSA_NAME = 'SanFrancisco'
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[MSA_NAME] #MSA_NAME_FULL = 'San_Francisco_Oakland_Hayward_CA'
print('MSA_NAME: ',MSA_NAME)
# Policies to compare
policy_to_compare = ['No_Vaccination','Baseline','Age_Flood', 'Income_Flood','JUE_EW_Flood']
# Vaccination time
VACCINATION_TIME = sys.argv[2];print('VACCINATION_TIME:',VACCINATION_TIME)
VACCINATION_TIME_STR = VACCINATION_TIME
VACCINATION_TIME = float(VACCINATION_TIME)
print(VACCINATION_TIME_STR,'\n',VACCINATION_TIME)
policy_savename = 'adaptive_%sd_hybrid'%VACCINATION_TIME_STR
print('policy_savename:',policy_savename)
# Vaccination_Ratio
VACCINATION_RATIO = sys.argv[3]; print('VACCINATION_RATIO:',VACCINATION_RATIO)
VACCINATION_RATIO = float(VACCINATION_RATIO)
# Consider hesitancy or not
consider_hesitancy = sys.argv[4]
print('Consider hesitancy? ', consider_hesitancy)
if(consider_hesitancy not in ['True','False']):
print('Invalid value for consider_hesitancy. Please check.')
pdb.set_trace()
# Acceptance scenario, if considering hesitancy
# if consider_hesitancy=='False', this field does not affect anything
ACCEPTANCE_SCENARIO = sys.argv[5]
print('Vaccine acceptance scenario: ', ACCEPTANCE_SCENARIO)
w1 = float(sys.argv[6])
w2 = float(sys.argv[7])
w3 = float(sys.argv[8])
w4 = float(sys.argv[9])
w5 = float(sys.argv[10])
weights = [w1,w2,w3,w4,w5]
print('Weights:', weights)
# Quick Test: prototyping
quick_test = sys.argv[11]; print('Quick testing?', quick_test)
if(quick_test == 'True'):
NUM_SEEDS = 2
NUM_SEEDS_CHECKING = 2
else:
NUM_SEEDS = 30
NUM_SEEDS_CHECKING = 30
print('NUM_SEEDS: ', NUM_SEEDS)
print('NUM_SEEDS_CHECKING: ', NUM_SEEDS_CHECKING)
STARTING_SEED = range(NUM_SEEDS)
STARTING_SEED_CHECKING = range(NUM_SEEDS_CHECKING)
distribution_time = VACCINATION_RATIO / RECHECK_INTERVAL # 分几次把疫苗分配完
# Compare all policies with no_vaccination scenario
REL_TO = 'No_Vaccination'
###############################################################################
# Functions
def run_simulation(starting_seed, num_seeds, vaccination_vector, vaccine_acceptance,protection_rate=1):
#m = disease_model_only_modify_attack_rates.Model(starting_seed=starting_seed,
m = disease_model_diff_acceptance.Model(starting_seed=starting_seed, #20211007
num_seeds=num_seeds,
debug=False,clip_poisson_approximation=True,ipf_final_match='poi',ipf_num_iter=100)
m.init_exogenous_variables(poi_areas=poi_areas,
poi_dwell_time_correction_factors=poi_dwell_time_correction_factors,
cbg_sizes=cbg_sizes,
poi_cbg_visits_list=poi_cbg_visits_list,
all_hours=all_hours,
p_sick_at_t0=constants.parameters_dict[MSA_NAME][0],
#vaccination_time=24*31, # when to apply vaccination (which hour)
vaccination_time=24*VACCINATION_TIME, # when to apply vaccination (which hour)
vaccination_vector = vaccination_vector,
vaccine_acceptance = vaccine_acceptance,#20211007
protection_rate = protection_rate,
home_beta=constants.parameters_dict[MSA_NAME][1],
cbg_attack_rates_original = cbg_attack_rates_scaled,
cbg_death_rates_original = cbg_death_rates_scaled,
poi_psi=constants.parameters_dict[MSA_NAME][2],
just_compute_r0=False,
latency_period=96, # 4 days
infectious_period=84, # 3.5 days
confirmation_rate=.1,
confirmation_lag=168, # 7 days
death_lag=432
)
m.init_endogenous_variables()
T1,L_1,I_1,R_1,C2,D2,total_affected, history_C2, history_D2, total_affected_each_cbg = m.simulate_disease_spread(no_print=True)
del T1
del L_1
del I_1
del C2
del D2
return history_C2, history_D2
# Analyze results and produce graphs
def output_result(cbg_table, demo_feat, policy_list, num_groups, print_result=True,draw_result=True, rel_to=REL_TO):
#print('Observation dimension: ', demo_feat)
results = {}
for policy in policy_list:
exec("final_deaths_rate_%s_total = cbg_table['Final_Deaths_%s'].sum()/cbg_table['Sum'].sum()" % (policy.lower(),policy))
cbg_table['Final_Deaths_' + policy] = eval('avg_final_deaths_' + policy.lower())
exec("%s = np.zeros(num_groups)" % ('final_deaths_rate_'+ policy.lower()))
deaths_total_abs = eval('final_deaths_rate_%s_total'%(policy.lower()))
for i in range(num_groups):
eval('final_deaths_rate_'+ policy.lower())[i] = cbg_table[cbg_table[demo_feat + '_Quantile']==i]['Final_Deaths_' + policy].sum()
eval('final_deaths_rate_'+ policy.lower())[i] /= cbg_table[cbg_table[demo_feat + '_Quantile']==i]['Sum'].sum()
deaths_gini_abs = functions.gini(eval('final_deaths_rate_'+ policy.lower()))
if(rel_to=='No_Vaccination'):
# rel is compared to No_Vaccination
if(policy=='No_Vaccination'):
deaths_total_no_vaccination = deaths_total_abs
deaths_gini_no_vaccination = deaths_gini_abs
deaths_total_rel = 0; deaths_gini_rel = 0
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs, #'%.4f'
'deaths_gini_rel':'%.4f'% deaths_gini_rel} #'%.4f'
else:
deaths_total_rel = (eval('final_deaths_rate_%s_total'%(policy.lower())) - deaths_total_no_vaccination) / deaths_total_no_vaccination
deaths_gini_rel = (functions.gini(eval('final_deaths_rate_'+ policy.lower())) - deaths_gini_no_vaccination) / deaths_gini_no_vaccination
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs, #'%.4f'
'deaths_gini_rel':'%.4f'% deaths_gini_rel} #'%.4f'
elif(rel_to=='Baseline'):
# rel is compared to Baseline
if(policy=='Baseline'):
deaths_total_baseline = deaths_total_abs
deaths_gini_baseline = deaths_gini_abs
deaths_total_rel = 0
deaths_gini_rel = 0
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs,
'deaths_gini_rel':'%.4f'% deaths_gini_rel}
else:
deaths_total_rel = (eval('final_deaths_rate_%s_total'%(policy.lower())) - deaths_total_baseline) / deaths_total_baseline
deaths_gini_rel = (functions.gini(eval('final_deaths_rate_'+ policy.lower())) - deaths_gini_baseline) / deaths_gini_baseline
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs,
'deaths_gini_rel':'%.4f'% deaths_gini_rel}
if(print_result==True):
print('Policy: ', policy)
print('Deaths, Gini Index: ',functions.gini(eval('final_deaths_rate_'+ policy.lower())))
if(policy=='Baseline'):
deaths_total_baseline = eval('final_deaths_rate_%s_total'%(policy.lower()))
deaths_gini_baseline = functions.gini(eval('final_deaths_rate_'+ policy.lower()))
if(policy!='Baseline' and policy!='No_Vaccination'):
print('Compared to baseline:')
print('Deaths total: ', (eval('final_deaths_rate_%s_total'%(policy.lower())) - deaths_total_baseline) / deaths_total_baseline)
print('Deaths gini: ', (functions.gini(eval('final_deaths_rate_'+ policy.lower())) - deaths_gini_baseline) / deaths_gini_baseline)
return results
def make_gini_table(policy_list, demo_feat_list, num_groups, save_path, save_result=False):
cbg_table_name_dict=dict()
cbg_table_name_dict['Age'] = cbg_age_msa
cbg_table_name_dict['Mean_Household_Income'] = cbg_income_msa
cbg_table_name_dict['Essential_Worker'] = cbg_occupation_msa
cbg_table_name_dict['Hybrid'] = cbg_age_msa # randomly choose one. it doesn't matter.
#print('Policy list: ', policy_list)
#print('Demographic feature list: ', demo_feat_list)
gini_df = pd.DataFrame(columns=pd.MultiIndex.from_tuples([('All','deaths_total_abs'),('All','deaths_total_rel')]))
gini_df['Policy'] = policy_list
for demo_feat in demo_feat_list:
results = output_result(cbg_table_name_dict[demo_feat],
demo_feat, policy_list, num_groups=NUM_GROUPS,
print_result=False, draw_result=False,rel_to=REL_TO)
for i in range(len(policy_list)):
policy = policy_list[i]
gini_df.loc[i,('All','deaths_total_abs')] = results[policy]['deaths_total_abs']
gini_df.loc[i,('All','deaths_total_rel')] = results[policy]['deaths_total_rel'] if abs(float(results[policy]['deaths_total_rel']))>=0.01 else 0
gini_df.loc[i,(demo_feat,'deaths_gini_abs')] = results[policy]['deaths_gini_abs']
gini_df.loc[i,(demo_feat,'deaths_gini_rel')] = results[policy]['deaths_gini_rel'] if abs(float(results[policy]['deaths_gini_rel']))>=0.01 else 0
gini_df.set_index(['Policy'],inplace=True)
# Transpose
gini_df_trans = pd.DataFrame(gini_df.values.T, index=gini_df.columns, columns=gini_df.index)#转置
# Save .csv
if(save_result==True):
gini_df_trans.to_csv(save_path)
return gini_df_trans
###############################################################################
# Load Demographic-Related Data
start = time.time()
# Load POI-CBG visiting matrices
f = open(os.path.join(root, MSA_NAME, '%s_2020-03-01_to_2020-05-02.pkl'%MSA_NAME_FULL), 'rb')
poi_cbg_visits_list = pickle.load(f)
f.close()
# Load precomputed parameters to adjust(clip) POI dwell times
d = pd.read_csv(os.path.join(root,MSA_NAME, 'parameters_%s.csv' % MSA_NAME))
# No clipping
new_d = d
all_hours = functions.list_hours_in_range(MIN_DATETIME, MAX_DATETIME)
poi_areas = new_d['feet'].values#面积
poi_dwell_times = new_d['median'].values#平均逗留时间
poi_dwell_time_correction_factors = (poi_dwell_times / (poi_dwell_times+60)) ** 2
del new_d
del d
# Load ACS Data for MSA-county matching
acs_data = pd.read_csv(os.path.join(root,'list1.csv'),header=2)
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
msa_match = functions.match_msa_name_to_msas_in_acs_data(MSA_NAME_FULL, acs_msas)
msa_data = acs_data[acs_data['CBSA Title'] == msa_match].copy()
msa_data['FIPS Code'] = msa_data.apply(lambda x : functions.get_fips_codes_from_state_and_county_fp((x['FIPS State Code']),x['FIPS County Code']), axis=1)
good_list = list(msa_data['FIPS Code'].values)
print('Counties included: ', good_list)
del acs_data
# Load CBG ids for the MSA
cbg_ids_msa = pd.read_csv(os.path.join(root,MSA_NAME,'%s_cbg_ids.csv'%MSA_NAME_FULL))
cbg_ids_msa.rename(columns={"cbg_id":"census_block_group"}, inplace=True)
M = len(cbg_ids_msa)
# Mapping from cbg_ids to columns in hourly visiting matrices
cbgs_to_idxs = dict(zip(cbg_ids_msa['census_block_group'].values, range(M)))
x = {}
for i in cbgs_to_idxs:
x[str(i)] = cbgs_to_idxs[i]
#print('Number of CBGs in this metro area:', M)
# Load SafeGraph data to obtain CBG sizes (i.e., populations)
filepath = os.path.join(root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = pd.read_csv(filepath)
# Extract CBGs belonging to the MSA - https://covid-mobility.stanford.edu//datasets/
cbg_age_msa =
|
pd.merge(cbg_ids_msa, cbg_agesex, on='census_block_group', how='left')
|
pandas.merge
|
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
|
pd.testing.assert_frame_equal(matches, matches_auto)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel())
]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert (np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert (np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert (not result.any())
result = isnull([u('foo'), u('bar')])
assert (not result.any())
def test_isnull_nat():
result = isnull([NaT])
exp = np.array([True])
assert (np.array_equal(result, exp))
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
assert (np.array_equal(result, exp))
def test_isnull_numpy_nat():
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert (
|
notnull(idx)
|
pandas.types.missing.notnull
|
import numpy as np
import pandas as pd
import pywt
import matplotlib.pyplot as plt
class PreProcessing:
def __init__(self, split, feature_split):
self.split = split
self.feature_split = feature_split
self.stock_data = pd.read_csv("stock_data.csv")
# wavelet transform and create autoencoder data
def make_wavelet_train(self):
train_data = []
test_data = []
log_train_data = []
for i in range((len(self.stock_data)//10)*10 - 11):
train = []
log_ret = []
for j in range(1, 6):
x = np.array(self.stock_data.iloc[i: i + 11, j])
(ca, cd) = pywt.dwt(x, "haar")
cat = pywt.threshold(ca, np.std(ca), mode="soft")
cdt = pywt.threshold(cd, np.std(cd), mode="soft")
tx = pywt.idwt(cat, cdt, "haar")
log = np.diff(np.log(tx))*100
macd = np.mean(x[5:]) - np.mean(x)
# ma = np.mean(x)
sd = np.std(x)
log_ret = np.append(log_ret, log)
x_tech = np.append(macd*10, sd)
train = np.append(train, x_tech)
train_data.append(train)
log_train_data.append(log_ret)
trained = pd.DataFrame(train_data)
trained.to_csv("preprocessing/indicators.csv")
log_train = pd.DataFrame(log_train_data, index=None)
log_train.to_csv("preprocessing/log_train.csv")
# auto_train = pd.DataFrame(train_data[0:800])
# auto_test = pd.DataFrame(train_data[801:1000])
# auto_train.to_csv("auto_train.csv")
# auto_test.to_csv("auto_test.csv")
rbm_train = pd.DataFrame(log_train_data[0:int(self.split*self.feature_split*len(log_train_data))], index=None)
rbm_train.to_csv("preprocessing/rbm_train.csv")
rbm_test = pd.DataFrame(log_train_data[int(self.split*self.feature_split*len(log_train_data))+1:
int(self.feature_split*len(log_train_data))])
rbm_test.to_csv("preprocessing/rbm_test.csv")
for i in range((len(self.stock_data) // 10) * 10 - 11):
y = 100*np.log(self.stock_data.iloc[i + 11, 5] / self.stock_data.iloc[i + 10, 5])
test_data.append(y)
test = pd.DataFrame(test_data)
test.to_csv("preprocessing/test_data.csv")
def make_test_data(self):
test_stock = []
# stock_data_test = pd.read_csv("stock_data_test.csv", index_col=0)
for i in range((len(self.stock_data) // 10) * 10 - 11):
l = self.stock_data.iloc[i+11, 5]
test_stock.append(l)
test = pd.DataFrame(test_stock)
test.to_csv("preprocessing/test_stock.csv")
stock_test_data = np.array(test_stock)[int(self.feature_split*len(test_stock) +
self.split*(1-self.feature_split)*len(test_stock)):]
stock =
|
pd.DataFrame(stock_test_data, index=None)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""Cross references from cbms2019.
.. seealso:: https://github.com/pantapps/cbms2019
"""
import pandas as pd
from pyobo.constants import (
PROVENANCE,
SOURCE_ID,
SOURCE_PREFIX,
TARGET_ID,
TARGET_PREFIX,
XREF_COLUMNS,
)
__all__ = [
"get_cbms2019_xrefs_df",
]
#: Columns: DOID, DO name, xref xb, xref ix
base_url = "https://raw.githubusercontent.com/pantapps/cbms2019/master"
doid_to_all = f"{base_url}/mesh_icd10cm_via_do_not_mapped_umls.tsv"
#: Columns: SNOMEDCT_ID, SNOMEDCIT_NAME, ICD10CM_ID, ICD10CM_NAME, MESH_ID
all_to_all = f"{base_url}/mesh_icd10cm_via_snomedct_not_mapped_umls.tsv"
#: Columns: DOID, DO name, xref xb, xref ix
doid_to_all_2 = f"{base_url}/mesh_snomedct_via_do_not_mapped_umls.tsv"
#: Columns: SNOMEDCT_ID, SNOMEDCIT_NAME, ICD10CM_ID, ICD10CM_NAME, MESH_ID
all_to_all_2 = f"{base_url}/mesh_snomedct_via_icd10cm_not_mapped_umls.tsv"
NSM = {
"MESH": "mesh",
"ICD10CM": "icd",
"SNOMEDCT_US_2016_03_01": "snomedct",
}
def _get_doid(url: str) -> pd.DataFrame:
df =
|
pd.read_csv(url, sep="\t", usecols=["DO_ID", "resource", "resource_ID"])
|
pandas.read_csv
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os.path import join as pjoin
import datetime
import io
import os
import json
import pytest
from pyarrow.compat import guid, u
from pyarrow.filesystem import LocalFileSystem
import pyarrow as pa
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
import numpy as np
import pandas as pd
import pandas.util.testing as tm
# Ignore these with pytest ... -m 'not parquet'
parquet = pytest.mark.parquet
def _write_table(table, path, **kwargs):
import pyarrow.parquet as pq
if isinstance(table, pd.DataFrame):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
import pyarrow.parquet as pq
return pq.read_table(*args, **kwargs)
@parquet
def test_single_pylist_column_roundtrip(tmpdir):
for dtype in [int, float]:
filename = tmpdir.join('single_{}_column.parquet'
.format(dtype.__name__))
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=('a', 'b'))
_write_table(table, filename.strpath)
table_read = _read_table(filename.strpath)
for col_written, col_read in zip(table.itercolumns(),
table_read.itercolumns()):
assert col_written.name == col_read.name
assert col_read.data.num_chunks == 1
data_written = col_written.data.chunk(0)
data_read = col_read.data.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Test other timestamp resolutions now that arrow supports
# them
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
return df
@parquet
def test_pandas_parquet_2_0_rountrip(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = pq.read_pandas(filename.strpath)
assert b'pandas' in table_read.schema.metadata
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_custom_metadata(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
md = pq.read_metadata(filename.strpath).metadata
assert b'pandas' in md
js = json.loads(md[b'pandas'].decode('utf8'))
assert js['index_columns'] == ['__index_level_0__']
@parquet
def test_pandas_parquet_2_0_rountrip_read_pandas_no_index_written(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = json.loads(arrow_table.schema.metadata[b'pandas'].decode('utf8'))
assert not js['index_columns']
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = pq.read_pandas(filename.strpath)
js = json.loads(table_read.schema.metadata[b'pandas'].decode('utf8'))
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_1_0_rountrip(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename.strpath, version="1.0")
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_column_selection(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename.strpath)
table_read = _read_table(filename.strpath, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@parquet
def test_pandas_parquet_native_file_roundtrip(tmpdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_read_pandas_column_subset(tmpdir):
import pyarrow.parquet as pq
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@parquet
def test_pandas_parquet_empty_roundtrip(tmpdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_pyfile_roundtrip(tmpdir):
filename = tmpdir.join('pandas_pyfile_roundtrip.parquet').strpath
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with open(filename, 'wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(open(filename, 'rb').read())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_configuration_options(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename.strpath,
version="2.0",
use_dictionary=use_dictionary)
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP']:
_write_table(arrow_table, filename.strpath,
version="2.0",
compression=compression)
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(df):
import pyarrow.parquet as pq
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
@parquet
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.logical_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
rg_meta = meta.row_group(0)
repr(rg_meta)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
@parquet
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
assert fileh.schema.equals(fileh.schema)
assert fileh.schema.equals(fileh2.schema)
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema[0].equals(fileh.schema[0])
assert not fileh.schema[0].equals(fileh.schema[1])
@parquet
def test_column_of_arrays(tmpdir):
df, schema = dataframe_with_arrays()
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
|
tm.assert_frame_equal(df, df_read)
|
pandas.util.testing.assert_frame_equal
|
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df =
|
DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA,
|
u('eeeeee')
|
pandas.compat.u
|
import sys
import os
import collections
from datetime import timedelta
from typing import List, Union, Any, Tuple
import logging
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from d3m.primitive_interfaces.base import CallResult
from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
from d3m.exceptions import PrimitiveNotFittedError
from d3m import container, utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
from statsmodels.tsa.api import VAR as vector_ar
from statsmodels.tsa.vector_ar.var_model import VARResultsWrapper
import statsmodels.api as sm
import scipy.stats as stats
from ..utils.time_utils import calculate_time_frequency, discretize_time_difference
from .arima import Arima
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
__author__ = "Distil"
__version__ = "1.2.1"
__contact__ = "mailto:<EMAIL>"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
MAX_INT = np.finfo("d").max - 1
class Params(params.Params):
integer_time: bool
time_column: str
targets: List[str]
target_indices: List[int]
X_train: Union[List[d3m_DataFrame], List[pd.DataFrame]]
X_train_names: Any
filter_idxs: List[str]
interpolation_ranges: Union[pd.Series, None, pd.DataFrame]
freq: str
is_fit: bool
fits: Union[
List[VARResultsWrapper], List[Arima], List[Union[VARResultsWrapper, Arima]]
]
values: List[np.ndarray]
values_diff: List[np.ndarray]
lag_order: Union[
List[None],
List[np.int64],
List[int],
List[Union[np.int64, None]],
List[Union[int, None]],
List[Union[np.int64, int, None]],
]
class Hyperparams(hyperparams.Hyperparams):
max_lag_order = hyperparams.Union[Union[int, None]](
configuration=collections.OrderedDict(
user_selected=hyperparams.UniformInt(lower=0, upper=100, default=1),
auto_selected=hyperparams.Hyperparameter[None](
default=None,
description="Lag order of regressions automatically selected",
),
),
default="user_selected",
description="The lag order to apply to regressions. If user-selected, the same lag will be "
+ "applied to all regressions. If auto-selected, different lags can be selected for different "
+ "regressions.",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
)
seasonal = hyperparams.UniformBool(
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to perform ARIMA prediction with seasonal component",
)
seasonal_differencing = hyperparams.UniformInt(
lower=1,
upper=365,
default=1,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="period of seasonal differencing to use in ARIMA prediction",
)
dynamic = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="whether to perform dynamic in-sample prediction with ARIMA model",
)
interpret_value = hyperparams.Enumeration(
default="lag_order",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
values=["series", "lag_order"],
description="whether to return weight coefficients for each series or each lag order "
+ "separately in the regression",
)
interpret_pooling = hyperparams.Enumeration(
default="avg",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
values=["avg", "max"],
description="whether to pool weight coefficients via average or max",
)
confidence_interval_horizon = hyperparams.UniformInt(
lower=1,
upper=100,
default=2,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="horizon for confidence interval forecasts. Exposed through auxiliary "
+ "'produce_confidence_intervals' method",
)
confidence_interval_alpha = hyperparams.Uniform(
lower=0.01,
upper=1,
default=0.1,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="significance level for confidence interval, i.e. alpha = 0.05 "
+ "returns a 95%% confdience interval from alpha / 2 to 1 - (alpha / 2) . "
+ "Exposed through auxiliary 'produce_confidence_intervals' method",
)
class VarPrimitive(
SupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]
):
"""
This primitive applies a vector autoregression (VAR) multivariate forecasting model to time series data.
It defaults to an ARIMA model if the time series is univariate. The VAR
implementation comes from the statsmodels library. The lag order and AR, MA, and
differencing terms for the VAR and ARIMA models respectively are selected automatically
and independently for each regression. User can override automatic selection with 'max_lag_order' HP.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "76b5a479-c209-4d94-92b5-7eba7a4d4499",
"version": __version__,
"name": "VAR",
"keywords": ["Time Series"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": [
"https://github.com/uncharted-distil/d3m-primitives",
],
},
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.24"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"python_path": "d3m.primitives.time_series_forecasting.vector_autoregression.VAR",
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.VECTOR_AUTOREGRESSION
],
"primitive_family": metadata_base.PrimitiveFamily.TIME_SERIES_FORECASTING,
}
)
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
# track metadata about times, targets, indices, grouping keys
self._filter_idxs = None
self._targets = None
self._key = None
self._integer_time = False
self._target_indices = None
# information about interpolation
self._freq = None
self._interpolation_ranges = None
# data needed to fit model and reconstruct predictions
self._X_train_names = []
self._X_train = None
self._mins = None
self._lag_order = []
self._values = None
self._fits = []
self._is_fit = False
def get_params(self) -> Params:
if not self._is_fit:
return Params(
integer_time=None,
time_column=None,
targets=None,
target_indices=None,
X_train=None,
fits=None,
values=None,
values_diff=None,
lag_order=None,
positive=None,
filter_idxs=None,
interpolation_ranges=None,
freq=None,
is_fit=None,
X_train_names=None,
)
return Params(
integer_time=self._integer_time,
time_column=self._time_column,
targets=self._targets,
target_indices=self._target_indices,
X_train=self._X_train,
fits=self._fits,
values=self._values,
values_diff=self._values_diff,
lag_order=self._lag_order,
filter_idxs=self._filter_idxs,
interpolation_ranges=self._interpolation_ranges,
freq=self._freq,
is_fit=self._is_fit,
X_train_names=self._X_train_names,
)
def set_params(self, *, params: Params) -> None:
self._integer_time = params["integer_time"]
self._time_column = params["time_column"]
self._targets = params["targets"]
self._target_indices = params["target_indices"]
self._X_train = params["X_train"]
self._fits = params["fits"]
self._values = params["values"]
self._values_diff = params["values_diff"]
self._lag_order = params["lag_order"]
self._filter_idxs = params["filter_idxs"]
self._interpolation_ranges = params["interpolation_ranges"]
self._freq = params["freq"]
self._is_fit = params["is_fit"]
self._X_train_names = params["X_train_names"]
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
"""Sets primitive's training data
Arguments:
inputs {Inputs} -- D3M dataframe containing attributes
outputs {Outputs} -- D3M dataframe containing targets
Raises:
ValueError: If multiple columns are annotated with 'Time' or 'DateTime' metadata
"""
inputs_copy = inputs.append_columns(outputs)
times = self._get_cols(inputs_copy)
inputs_copy = self._convert_times(inputs_copy, times)
num_group_keys, drop_list = self._get_grouping_keys(inputs_copy)
inputs_copy = inputs_copy.drop(
columns=[list(inputs_copy)[i] for i in drop_list + self._key]
) # drop index and extraneous grouping keys
self._prepare_collections(inputs_copy, num_group_keys)
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
"""If there are multiple endogenous series, primitive will fit VAR model. Otherwise it will fit an ARIMA
model. In the VAR case, the lag order will be automatically choosen based on BIC (unless user overrides).
In the ARIMA case, the lag order will be automatically chosen by differencing tests (again, unless user
overrides).
Keyword Arguments:
timeout {float} -- timeout, not considered (default: {None})
iterations {int} -- iterations, not considered (default: {None})
Returns:
CallResult[None]
"""
# mark if data is exclusively positive
self._values = [sequence.values for sequence in self._X_train]
# self._positive = [True if np.min(vals) < 0 else False for vals in self._values]
# difference data - VAR assumes data is stationary
self._values_diff = [np.diff(sequence, axis=0) for sequence in self._X_train]
# define models
if self.hyperparams["max_lag_order"] is None:
arima_max_order = 5
else:
arima_max_order = self.hyperparams["max_lag_order"]
self.models = [
vector_ar(vals, dates=original.index, freq=self._freq)
if vals.shape[1] > 1
else Arima(
seasonal=self.hyperparams["seasonal"],
seasonal_differencing=self.hyperparams["seasonal_differencing"],
max_order=arima_max_order,
dynamic=self.hyperparams["dynamic"],
)
for vals, original in zip(self._values_diff, self._X_train)
]
self._robust_fit(self.models, self._values_diff, self._X_train)
return CallResult(None, has_finished=self._is_fit)
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""prediction for future time series data
Arguments:
inputs {Inputs} -- attribute dataframe
Keyword Arguments:
timeout {float} -- timeout, not considered (default: {None})
iterations {int} -- iterations, not considered (default: {None})
Raises:
PrimitiveNotFittedError: if primitive not fit
Returns:
CallResult[Outputs] -- predictions for each prediction interval requested
"""
return self._produce(inputs)
def produce_confidence_intervals(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""produce confidence intervals for each series
Arguments:
inputs {Inputs} -- attribute dataframe
Keyword Arguments:
timeout {float} -- timeout, not considered (default: {None})
iterations {int} -- iterations, not considered (default: {None})
Raises:
PrimitiveNotFittedError: if primitive not fit
Returns:
CallResult[Outputs] -- predictions for each prediction interval requested
Ex.
tgt | tgt-0.05 | tgt-0.95
----------------------------
5 | 3 | 7
6 | 4 | 8
5 | 3 | 7
6 | 4 | 8
"""
return self._produce(inputs, return_conf_int=True)
def produce_weights(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""Produce absolute values of correlation coefficients (weights) for each of the terms used in each
regression model. Terms must be aggregated by series or by lag order (thus the need for absolute value).
Pooling operation can be maximum or average (controlled by 'interpret_pooling' HP).
Arguments:
inputs {Inputs} -- full D3M dataframe, containing attributes, key, and target
Keyword Arguments:
timeout {float} -- timeout, not considered (default: {None})
iterations {int} -- iterations, considered (default: {None})
Raises:
PrimitiveNotFittedError: if primitive not fit
Returns:
CallResult[Outputs] -- pandas df where each row represents a unique series from one of the
regressions that was fit. The columns contain the coefficients for each term in the regression,
potentially aggregated by series or lag order. Column names will represent the lag order or
series to which that column refers. If the regression is an ARIMA model, the set of column
names will also contain AR_i (autoregressive terms) and MA_i (moving average terms).
Columns that are not included in the regression for a specific series will have NaN
values in those respective columns.
"""
if not self._is_fit:
raise PrimitiveNotFittedError("Primitive not fitted.")
if self.hyperparams["interpret_value"] == "series":
logger.info(
"You should interpret a row of the returned matrix like this: "
+ "Each row represents an endogeneous variable for which the VAR process learned an equation. "
+ "Each column represents all of the endogenous variables used in the regression equation. "
+ "Each matrix entry represents the weight of the column endogeneous variable in the equation for the "
+ "row endogenous variable."
)
# get correlation coefficients
coefficients = [
np.absolute(fit.coefs)
if lags is not None
else fit.get_absolute_value_params()
for fit, lags in zip(self._fits, self._lag_order)
]
trends = [
np.absolute(fit.params[0, :].reshape(-1, 1)) if lags is not None else None
for fit, lags in zip(self._fits, self._lag_order)
]
# combine coeffcient vectors into single df
coef_df = None
for coef, trend, names in zip(coefficients, trends, self._X_train_names):
# aggregate VAR coefficients based on HPs
if trend is not None:
if self.hyperparams["interpret_value"] == "series":
if self.hyperparams["interpret_pooling"] == "avg":
coef = np.mean(coef, axis=0) # K x K
else:
coef = np.max(coef, axis=0) # K x K
colnames = names
else:
# or axis = 2, I believe symmetrical
if self.hyperparams["interpret_pooling"] == "avg":
coef = np.mean(coef, axis=1).T # K x p + 1
else:
coef = np.max(coef, axis=1).T # K x p + 1
coef = np.concatenate((trend, coef), axis=1)
colnames = ["trend_0"] + [
"ar_" + str(i + 1) for i in range(coef.shape[1] - 1)
]
new_df = pd.DataFrame(coef, columns=colnames, index=names)
coef_df = pd.concat([coef_df, new_df], sort=True)
# add index to ARIMA params
else:
coef.index = names
if self.hyperparams["interpret_value"] == "lag_order":
coef_df = pd.concat([coef_df, coef], sort=True)
if coef_df is None:
logger.info(
f"There was only one variable in each grouping of time series, "
+ "therefore only ARIMA models were fit. Additionally, becasue the 'interpret_value' "
+ "hyperparameter is set to series, this will return an empty dataframe."
)
return CallResult(
container.DataFrame(coef_df, generate_metadata=True),
has_finished=self._is_fit,
)
def _get_cols(self, frame):
"""private util function: get indices of important columns from metadata"""
# mark datetime column
times = frame.metadata.list_columns_with_semantic_types(
(
"https://metadata.datadrivendiscovery.org/types/Time",
"http://schema.org/DateTime",
)
)
if len(times) != 1:
raise ValueError(
f"There are {len(times)} indices marked as datetime values. Please only specify one"
)
self._time_column = list(frame)[times[0]]
# mark key variable
self._key = frame.metadata.get_columns_with_semantic_type(
"https://metadata.datadrivendiscovery.org/types/PrimaryKey"
)
# mark target variables
self._targets = frame.metadata.list_columns_with_semantic_types(
(
"https://metadata.datadrivendiscovery.org/types/TrueTarget",
"https://metadata.datadrivendiscovery.org/types/Target",
)
)
self._targets = [list(frame)[t] for t in self._targets]
return times
def _convert_times(self, frame, times):
"""private util function: convert to pd datetime
if datetime columns are integers, parse as # of days
"""
if (
"http://schema.org/Integer"
in frame.metadata.query_column(times[0])["semantic_types"]
):
self._integer_time = True
frame[self._time_column] = pd.to_datetime(
frame[self._time_column] - 1, unit="D"
)
else:
frame[self._time_column] = pd.to_datetime(
frame[self._time_column], unit="s"
)
return frame
def _get_grouping_keys(self, frame):
"""see if 'GroupingKey' has been marked
otherwise fall through to use 'SuggestedGroupingKey' to intelligently calculate grouping key order
we sort keys so that VAR can operate on as many series as possible simultaneously (reverse order)
return the number of grouping columns and list of extraneous columns that should be dropped
"""
grouping_keys = frame.metadata.get_columns_with_semantic_type(
"https://metadata.datadrivendiscovery.org/types/GroupingKey"
)
suggested_grouping_keys = frame.metadata.get_columns_with_semantic_type(
"https://metadata.datadrivendiscovery.org/types/SuggestedGroupingKey"
)
if len(grouping_keys) == 0:
grouping_keys = suggested_grouping_keys
drop_list = []
else:
drop_list = suggested_grouping_keys
grouping_keys_counts = [
frame.iloc[:, key_idx].nunique() for key_idx in grouping_keys
]
grouping_keys = [
group_key
for count, group_key in sorted(zip(grouping_keys_counts, grouping_keys))
]
self._filter_idxs = [list(frame)[key] for key in grouping_keys]
return len(grouping_keys), drop_list
def _prepare_collections(self, frame, num_group_keys=0):
"""prepare separate collections of series on which to fit separate VAR or ARIMA models"""
# check whether no grouping keys are labeled
if num_group_keys == 0:
# avg across duplicated time indices if necessary and re-index
if sum(frame[self._time_column].duplicated()) > 0:
frame = frame.groupby(self._time_column).mean()
else:
frame = frame.set_index(self._time_column)
# interpolate
self._freq = calculate_time_frequency(frame.index[1] - frame.index[0])
frame = frame.interpolate(method="time", limit_direction="both")
# set X train
self._target_indices = [
i for i, col_name in enumerate(list(frame)) if col_name in self._targets
]
self._X_train = [frame]
self._X_train_names = [frame.columns]
else:
# find interpolation range from outermost grouping key
if num_group_keys == 1:
date_ranges = frame.agg({self._time_column: ["min", "max"]})
indices = frame[self._filter_idxs[0]].unique()
self._interpolation_ranges = pd.Series(
[date_ranges] * len(indices), index=indices
)
self._X_train = [None]
self._X_train_names = [None]
else:
self._interpolation_ranges = frame.groupby(
self._filter_idxs[:-1], sort=False
).agg({self._time_column: ["min", "max"]})
self._X_train = [
None for i in range(self._interpolation_ranges.shape[0])
]
self._X_train_names = [
None for i in range(self._interpolation_ranges.shape[0])
]
for name, group in frame.groupby(self._filter_idxs, sort=False):
if num_group_keys > 2:
group_value = name[:-1]
elif num_group_keys == 2:
group_value = name[0]
else:
group_value = name
if num_group_keys > 1:
training_idx = np.where(
self._interpolation_ranges.index.to_flat_index() == group_value
)[0][0]
else:
training_idx = 0
group = group.drop(columns=self._filter_idxs)
# avg across duplicated time indices if necessary and re-index
group = group.sort_values(by=[self._time_column])
if sum(group[self._time_column].duplicated()) > 0:
group = group.groupby(self._time_column).mean()
else:
group = group.set_index(self._time_column)
# interpolate
min_date = self._interpolation_ranges.loc[group_value][
self._time_column
]["min"]
max_date = self._interpolation_ranges.loc[group_value][
self._time_column
]["max"]
# assume frequency is the same across all time series
if self._freq is None:
self._freq = calculate_time_frequency(
group.index[1] - group.index[0]
)
group = group.reindex(
pd.date_range(min_date, max_date, freq=self._freq),
)
group = group.interpolate(method="time", limit_direction="both")
# add to training data under appropriate top-level grouping key
self._target_indices = [
i
for i, col_name in enumerate(list(group))
if col_name in self._targets
]
if self._X_train[training_idx] is None:
self._X_train[training_idx] = group
else:
self._X_train[training_idx] = pd.concat(
[self._X_train[training_idx], group], axis=1
)
if self._X_train_names[training_idx] is None:
self._X_train_names[training_idx] = [name]
else:
self._X_train_names[training_idx].append(name)
def _robust_fit(self, models, training_data, training_times):
"""fit models, robustly recover from matrix decomposition errors and other fitting
errors
"""
for vals, model, original in zip(training_data, models, training_times):
# VAR
if vals.shape[1] > 1:
try:
lags = model.select_order(
maxlags=self.hyperparams["max_lag_order"]
).bic
logger.info(
"Successfully performed model order selection. Optimal order = {} lags".format(
lags
)
)
except np.linalg.LinAlgError as e:
lags = 0
logger.info(f"Matrix decomposition error. Using lag order of 0")
except ValueError as e:
lags = 0
logger.info("ValueError: " + str(e) + ". Using lag order of 0")
self._lag_order.append(lags)
self._fits.append(model.fit(maxlags=lags))
# ARIMA
else:
X_train = pd.Series(
data=vals.reshape((-1,)), index=original.index[: vals.shape[0]]
)
model.fit(X_train)
self._lag_order.append(None)
self._fits.append(model)
self._is_fit = True
def _calculate_prediction_intervals(self, inputs: Inputs, num_group_keys: int):
"""private util function that uses learned grouping keys to extract horizon,
horizon intervals, and forecast_idxs
"""
# check whether no grouping keys are labeled
if num_group_keys == 0:
group_tuple = ((self._X_train_names[0][0], inputs),)
else:
group_tuple = inputs.groupby(self._filter_idxs, sort=False)
# groupby learned filter_idxs and extract n_periods, interval and d3mIndex information
n_periods = [0 for i in range(len(self._X_train))]
forecast_idxs = []
intervals = []
for name, group in group_tuple:
if num_group_keys > 2:
group_value = name[:-1]
elif num_group_keys == 2:
group_value = name[0]
else:
group_value = name
if num_group_keys > 1:
testing_idx = np.where(
self._interpolation_ranges.index.to_flat_index() == group_value
)[0][0]
else:
testing_idx = 0
col_idxs = [
i
for i, tupl in enumerate(self._X_train_names[testing_idx])
if tupl == name
]
if not len(col_idxs):
logger.info(
f"Series with category {name} did not exist in training data, "
+ f"These predictions will be returned as np.nan."
)
col_idx = -1
else:
col_idx = col_idxs[0]
forecast_idxs.append((testing_idx, col_idx))
min_train_idx = self._X_train[testing_idx].index[0]
local_intervals = discretize_time_difference(
group[self._time_column], min_train_idx, self._freq
)
intervals.append(local_intervals)
num_p = int(max(local_intervals) - self._X_train[testing_idx].shape[0] + 1)
if n_periods[testing_idx] < num_p:
n_periods[testing_idx] = num_p
return n_periods, forecast_idxs, intervals
def _forecast(self, n_periods, return_conf_int=False):
"""make future forecasts using models, prepend in-sample predictions, inverse transformations
using information extracted from prediction intervals
"""
forecasts = []
for fit, lags, vals, vals_diff, horizon in zip(
self._fits, self._lag_order, self._values, self._values_diff, n_periods
):
if lags is None:
preds = np.concatenate(
(vals[:1], vals_diff[:1], fit.predict_in_sample().reshape(-1, 1)),
axis=0,
)
else:
preds = np.concatenate(
(vals[:1], vals_diff[:lags], fit.fittedvalues), axis=0
)
in_sample_len = preds.shape[0]
if horizon > 0:
if return_conf_int:
alpha = self.hyperparams["confidence_interval_alpha"]
means, lowers, uppers = [], [], []
if lags is not None and lags > 0:
mean, lower, upper = fit.forecast_interval(
y=vals_diff[-fit.k_ar :], steps=horizon, alpha=alpha
)
elif lags == 0:
q = stats.norm.ppf(1 - alpha / 2)
sigma = np.sqrt(fit._forecast_vars(horizon))
mean = np.repeat(fit.params, horizon, axis=0)
lower = np.repeat(fit.params - q * sigma, horizon, axis=0)
upper = np.repeat(fit.params + q * sigma, horizon, axis=0)
else:
mean, lower, upper = fit.predict(
n_periods=horizon, return_conf_int=True, alpha=alpha
)
if len(mean.shape) == 1:
mean = mean.reshape(-1, 1)
lower = lower.reshape(-1, 1)
upper = upper.reshape(-1, 1)
preds = [
np.concatenate((preds, mean), axis=0),
np.concatenate((preds, lower), axis=0),
np.concatenate((preds, upper), axis=0),
]
preds = [p.cumsum(axis=0) for p in preds]
preds[1][:in_sample_len] = np.nan
preds[2][:in_sample_len] = np.nan
else:
if lags is not None and lags > 0:
mean = fit.forecast(y=vals_diff[-fit.k_ar :], steps=horizon)
elif lags == 0:
mean = np.repeat(fit.params, horizon, axis=0)
else:
mean = fit.predict(n_periods=horizon).reshape(-1, 1)
preds = np.concatenate((preds, mean), axis=0)
preds = [preds.cumsum(axis=0)]
else:
preds = [preds.cumsum(axis=0)]
if return_conf_int:
nan_array = np.empty(preds[0].shape)
nan_array[:] = np.nan
preds.append(nan_array)
preds.append(nan_array)
preds = [
|
pd.DataFrame(p)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import dash
import dash_auth
import json
import dash_core_components as dcc
import dash_daq as daq
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.express as px
from itertools import cycle
import pandas as pd
import numpy as np
import datetime
VALID_USERNAME_PASSWORD_PAIRS = {
'<PASSWORD>': '<PASSWORD>'
}
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
auth = dash_auth.BasicAuth(
app,
VALID_USERNAME_PASSWORD_PAIRS
)
server = app.server
opp = pd.read_csv('data/opportunity.csv', index_col=[0,1,2,3])
opportunity = pd.read_csv('data/days.csv', index_col=[0,1,2,3])
annual_operating = pd.read_csv('data/annual.csv', index_col=[0,1])
stats = pd.read_csv('data/scores.csv')
quantiles = np.arange(50,101,1)
quantiles = quantiles*.01
quantiles = np.round(quantiles, decimals=2)
dataset = opp.sort_index()
lines = opp.index.get_level_values(1).unique()
asset_metrics = ['Yield', 'Rate', 'Uptime']
groupby = ['Line', 'Product group']
oee = pd.read_csv('data/oee.csv')
oee['From Date/Time'] = pd.to_datetime(oee["From Date/Time"])
oee['To Date/Time'] = pd.to_datetime(oee["To Date/Time"])
oee["Run Time"] = pd.to_timedelta(oee["Run Time"])
oee = oee.loc[oee['Rate'] < 2500]
res = oee.groupby(groupby)[asset_metrics].quantile(quantiles)
df = pd.read_csv('data/products.csv')
descriptors = df.columns[:8]
production_df = df
production_df['product'] = production_df[descriptors[2:]].agg('-'.join, axis=1)
production_df = production_df.sort_values(['Product Family', 'EBIT'],
ascending=False)
stat_df = pd.read_csv('data/category_stats.csv')
old_products = df[descriptors].sum(axis=1).unique().shape[0]
weight_match = pd.read_csv('data/weight_match.csv')
def make_bubble_chart(x='EBITDA per Hr Rank', y='Adjusted EBITDA', color='Line',
size='Net Sales Quantity in KG'):
fig = px.scatter(weight_match, x=x, y=y, color=color, size=size)
fig.update_layout({
"plot_bgcolor": "#F9F9F9",
"paper_bgcolor": "#F9F9F9",
# "title": 'EBIT by Product Descriptor',
})
return fig
def calculate_margin_opportunity(sort='Worst', select=[0,10], descriptors=None):
if sort == 'Best':
local_df = stat_df.sort_values('score', ascending=False)
local_df = local_df.reset_index(drop=True)
else:
local_df = stat_df
if descriptors != None:
local_df = local_df.loc[local_df['descriptor'].isin(descriptors)]
if sort == 'Best':
new_df = pd.DataFrame()
for index in range(select[0],select[1]):
x = df.loc[(df[local_df.iloc[index]['descriptor']] == \
local_df.iloc[index]['group'])]
new_df = pd.concat([new_df, x])
new_df = new_df.drop_duplicates()
else:
new_df = df
for index in range(select[0],select[1]):
new_df = new_df.loc[~(new_df[local_df.iloc[index]['descriptor']] ==\
local_df.iloc[index]['group'])]
new_EBITDA = new_df['Adjusted EBITDA'].sum()
EBITDA_percent = new_EBITDA / df['Adjusted EBITDA'].sum() * 100
new_products = new_df[descriptors].sum(axis=1).unique().shape[0]
product_percent_reduction = (new_products) / \
old_products * 100
new_kg = new_df['Sales Quantity in KG'].sum()
old_kg = df['Sales Quantity in KG'].sum()
kg_percent = new_kg / old_kg * 100
return "€{:.1f} M of €{:.1f} M ({:.1f}%)".format(new_EBITDA/1e6,
df['Adjusted EBITDA'].sum()/1e6, EBITDA_percent), \
"{} of {} Products ({:.1f}%)".format(new_products,old_products,
product_percent_reduction),\
"{:.1f} M of {:.1f} M kg ({:.1f}%)".format(new_kg/1e6, old_kg/1e6,
kg_percent)
def make_violin_plot(sort='Worst', select=[0,10], descriptors=None):
if sort == 'Best':
local_df = stat_df.sort_values('score', ascending=False)
local_df = local_df.reset_index(drop=True)
else:
local_df = stat_df
if descriptors != None:
local_df = local_df.loc[local_df['descriptor'].isin(descriptors)]
fig = go.Figure()
for index in range(select[0],select[1]):
x = df.loc[(df[local_df.iloc[index]['descriptor']] == \
local_df.iloc[index]['group'])]['Adjusted EBITDA']
y = local_df.iloc[index]['descriptor'] + ': ' + df.loc[(df[local_df\
.iloc[index]['descriptor']] == local_df.iloc[index]['group'])]\
[local_df.iloc[index]['descriptor']]
name = '€ {:.0f}'.format(x.median())
fig.add_trace(go.Violin(x=y,
y=x,
name=name,
box_visible=True,
meanline_visible=True))
fig.update_layout({
"plot_bgcolor": "#F9F9F9",
"paper_bgcolor": "#F9F9F9",
"title": 'Adjusted EBITDA by Product Descriptor (Median in Legend)',
"yaxis.title": "EBITDA (€)",
"height": 400,
"margin": dict(
l=0,
r=0,
b=0,
t=30,
pad=4
),
})
return fig
def make_sunburst_plot(clickData=None, toAdd=None, col=None, val=None):
if clickData != None:
col = clickData["points"][0]['x'].split(": ")[0]
val = clickData["points"][0]['x'].split(": ")[1]
elif col == None:
col = 'Thickness Material A'
val = '47'
desc = list(descriptors[:-2])
if col in desc:
desc.remove(col)
if toAdd != None:
for item in toAdd:
desc.append(item)
test = production_df.loc[production_df[col] == val]
fig = px.sunburst(test, path=desc[:], color='Adjusted EBITDA', title='{}: {}'.format(
col, val),
color_continuous_scale=px.colors.sequential.Viridis)
fig.update_layout({
"plot_bgcolor": "#F9F9F9",
"title": '(Select in Violin) {}: {}'.format(col,val),
"paper_bgcolor": "#F9F9F9",
"height": 400,
"margin": dict(
l=0,
r=0,
b=0,
t=30,
pad=4
),
})
return fig
def make_ebit_plot(production_df, select=None, sort='Worst', descriptors=None):
families = production_df['Product Family'].unique()
colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3',\
'#FF6692', '#B6E880', '#FF97FF', '#FECB52']
colors_cycle = cycle(colors)
grey = ['#7f7f7f']
color_dic = {'{}'.format(i): '{}'.format(j) for i, j in zip(families,
colors)}
grey_dic = {'{}'.format(i): '{}'.format('#7f7f7f') for i in families}
fig = go.Figure()
if select == None:
for data in px.scatter(
production_df,
x='product',
y='Adjusted EBITDA',
color='Product Family',
color_discrete_map=color_dic,
opacity=1).data:
fig.add_trace(
data
)
elif select != None:
color_dic = {'{}'.format(i): '{}'.format(j) for i, j in zip(select,
colors)}
for data in px.scatter(
production_df,
x='product',
y='Adjusted EBITDA',
color='Product Family',
color_discrete_map=color_dic,
opacity=0.09).data:
fig.add_trace(
data
)
if sort == 'Best':
local_df = stat_df.sort_values('score', ascending=False)
elif sort == 'Worst':
local_df = stat_df
new_df = pd.DataFrame()
if descriptors != None:
local_df = local_df.loc[local_df['descriptor'].isin(descriptors)]
for index in select:
x = production_df.loc[(production_df[local_df.iloc[index]\
['descriptor']] == local_df.iloc[index]['group'])]
x['color'] = next(colors_cycle) # for line shapes
new_df = pd.concat([new_df, x])
new_df = new_df.reset_index(drop=True)
for data in px.scatter(
new_df,
x='product',
y='Adjusted EBITDA',
color='Product Family',
color_discrete_map=color_dic,
opacity=1).data:
fig.add_trace(
data
)
shapes=[]
for index, i in enumerate(new_df['product']):
shapes.append({'type': 'line',
'xref': 'x',
'yref': 'y',
'x0': i,
'y0': -4e5,
'x1': i,
'y1': 4e5,
'line':dict(
dash="dot",
color=new_df['color'][index],)})
fig.update_layout(shapes=shapes)
fig.update_layout({
"plot_bgcolor": "#F9F9F9",
"paper_bgcolor": "#F9F9F9",
"title": 'Adjusted EBITDA by Product Family',
"yaxis.title": "EBITDA (€)",
"height": 500,
"margin": dict(
l=0,
r=0,
b=0,
t=30,
pad=4
),
"xaxis.tickfont.size": 8,
# "font":dict(
# size=8,
# ),
})
return fig
def calculate_overlap(lines=['E27', 'E26']):
path=['Product group', 'Polymer', 'Base Type', 'Additional Treatment']
line1 = oee.loc[oee['Line'].isin([lines[0]])].groupby(path)\
['Quantity Good'].sum()
line2 = oee.loc[oee['Line'].isin([lines[1]])].groupby(path)\
['Quantity Good'].sum()
set1 = set(line1.index)
set2 = set(line2.index)
both = set1.intersection(set2)
unique = set1.union(set2) - both
kg_overlap = (line1.loc[list(both)].sum() + line2.loc[list(both)].sum()) /\
(line1.sum() + line2.sum())
return kg_overlap*100
def make_product_sunburst(lines=['E27', 'E26']):
fig = px.sunburst(oee.loc[oee['Line'].isin(lines)],
path=['Product group', 'Polymer', 'Base Type', 'Additional Treatment',\
'Line'],
color='Line')
overlap = calculate_overlap(lines)
fig.update_layout({
"plot_bgcolor": "#F9F9F9",
"paper_bgcolor": "#F9F9F9",
"height": 500,
"margin": dict(
l=0,
r=0,
b=0,
t=30,
pad=4
),
"title": "Product Overlap {:.1f}%: {}, {}".format(overlap,
lines[0], lines[1]),
})
return fig
def make_metric_plot(line='K40', pareto='Product', marginal='histogram'):
plot = oee.loc[oee['Line'] == line]
plot = plot.sort_values('Thickness Material A')
plot['Thickness Material A'] =
|
pd.to_numeric(plot['Thickness Material A'])
|
pandas.to_numeric
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
from past.utils import old_div
import os
import numpy as np
import pandas as pd
from threeML.io.rich_display import display
from threeML.io.file_utils import sanitize_filename
from ..serialize import Serialization
from .from_root_file import from_root_file
from .from_hdf5_file import from_hdf5_file
import astropy.units as u
def map_tree_factory(map_tree_file, roi):
# Sanitize files in input (expand variables and so on)
map_tree_file = sanitize_filename(map_tree_file)
if os.path.splitext(map_tree_file)[-1] == '.root':
return MapTree.from_root_file(map_tree_file, roi)
else:
return MapTree.from_hdf5(map_tree_file, roi)
class MapTree(object):
def __init__(self, analysis_bins, roi):
self._analysis_bins = analysis_bins
self._roi = roi
@classmethod
def from_hdf5(cls, map_tree_file, roi):
data_analysis_bins = from_hdf5_file(map_tree_file, roi)
return cls(data_analysis_bins, roi)
@classmethod
def from_root_file(cls, map_tree_file, roi):
"""
Create a MapTree object from a ROOT file and a ROI. Do not use this directly, use map_tree_factory instead.
:param map_tree_file:
:param roi:
:return:
"""
data_analysis_bins = from_root_file(map_tree_file, roi)
return cls(data_analysis_bins, roi)
def __iter__(self):
"""
This allows to loop over the analysis bins as in:
for analysis_bin in maptree:
... do something ...
:return: analysis bin_name iterator
"""
for analysis_bin in self._analysis_bins:
yield analysis_bin
def __getitem__(self, item):
"""
This allows to access the analysis bins by name:
first_analysis_bin = maptree["bin_name 0"]
:param item: string for access by name
:return: the analysis bin_name
"""
try:
return self._analysis_bins[item]
except IndexError:
raise IndexError("Analysis bin_name with index %i does not exist" % (item))
def __len__(self):
return len(self._analysis_bins)
@property
def analysis_bins_labels(self):
return list(self._analysis_bins.keys())
def display(self):
df = pd.DataFrame()
df['Bin'] = list(self._analysis_bins.keys())
df['Nside'] = [self._analysis_bins[bin_id].nside for bin_id in self._analysis_bins]
df['Scheme'] = [self._analysis_bins[bin_id].scheme for bin_id in self._analysis_bins]
# Compute observed counts, background counts, how many pixels we have in the ROI and
# the sky area they cover
n_bins = len(self._analysis_bins)
obs_counts = np.zeros(n_bins)
bkg_counts = np.zeros_like(obs_counts)
n_pixels = np.zeros(n_bins, dtype=int)
sky_area = np.zeros_like(obs_counts)
size = 0
for i, bin_id in enumerate(self._analysis_bins):
analysis_bin = self._analysis_bins[bin_id]
sparse_obs = analysis_bin.observation_map.as_partial()
sparse_bkg = analysis_bin.background_map.as_partial()
size += sparse_obs.nbytes
size += sparse_bkg.nbytes
obs_counts[i] = sparse_obs.sum()
bkg_counts[i] = sparse_bkg.sum()
n_pixels[i] = sparse_obs.shape[0]
sky_area[i] = n_pixels[i] * analysis_bin.observation_map.pixel_area
df['Obs counts'] = obs_counts
df['Bkg counts'] = bkg_counts
df['obs/bkg'] = old_div(obs_counts, bkg_counts)
df['Pixels in ROI'] = n_pixels
df['Area (deg^2)'] = sky_area
display(df)
first_bin_id = list(self._analysis_bins.keys())[0]
print("This Map Tree contains %.3f transits in the first bin" \
% self._analysis_bins[first_bin_id].n_transits)
print("Total data size: %.2f Mb" % (size * u.byte).to(u.megabyte).value)
def write(self, filename):
"""
Export the tree to a HDF5 file.
NOTE: if an ROI has been applied, only the data within the ROI will be saved.
:param filename: output filename. Use an extension .hd5 or .hdf5 to ensure proper handling by downstream
software
:return: None
"""
# Make a dataframe with the ordered list of bin names
# bin_names = map(lambda x:x.name, self._analysis_bins)
# Create a dataframe with a multi-index, with the energy bin name as first level and the HEALPIX pixel ID
# as the second level
multi_index_keys = []
dfs = []
all_metas = []
for bin_id in self._analysis_bins:
analysis_bin = self._analysis_bins[bin_id]
assert bin_id == analysis_bin.name, \
'Bin name inconsistency: {} != {}'.format(bin_id, analysis_bin.name)
multi_index_keys.append(analysis_bin.name)
this_df, this_meta = analysis_bin.to_pandas()
dfs.append(this_df)
all_metas.append(pd.Series(this_meta))
analysis_bins_df = pd.concat(dfs, axis=0, keys=multi_index_keys)
meta_df = pd.concat(all_metas, axis=1, keys=multi_index_keys).T
with Serialization(filename, mode='w') as serializer:
serializer.store_pandas_object('/analysis_bins', analysis_bins_df)
serializer.store_pandas_object('/analysis_bins_meta', meta_df)
# Write the ROI
if self._roi is not None:
serializer.store_pandas_object('/ROI', pd.Series(), **self._roi.to_dict())
else:
serializer.store_pandas_object('/ROI',
|
pd.Series()
|
pandas.Series
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: rz
@email:
"""
#%% imports
import itertools, time, copy
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
import Levenshtein as Lev
from sklearn import metrics
from .etdata import ETData
from .utils import convertToOneHot
#%% setup parameters
#%% code
def calc_k(gt, pr):
'''
Handles error if all samples are from the same class
'''
k = 1. if (gt == pr).all() else metrics.cohen_kappa_score(gt, pr)
return k
def calc_f1(gt, pr):
f1 = 1. if (gt == pr).all() else metrics.f1_score(gt, pr)
return f1
def calc_KE(etdata_gt, etdata_pr):
#calculate event level matches
gt_evt_index = [ind for i, n in enumerate(np.diff(etdata_gt.evt[['s', 'e']]).squeeze()) for ind in itertools.repeat(i, n)]
pr_evt_index = [ind for i, n in enumerate(np.diff(etdata_pr.evt[['s', 'e']]).squeeze()) for ind in itertools.repeat(i, n)]
overlap = np.vstack((gt_evt_index, pr_evt_index)).T
overlap_matrix = [_k + [len(list(_g)), False, False] for _k, _g in itertools.groupby(overlap.tolist())]
overlap_matrix = pd.DataFrame(overlap_matrix, columns=['gt', 'pr', 'l', 'matched', 'selected'])
overlap_matrix['gt_evt'] = etdata_gt.evt.loc[overlap_matrix['gt'], 'evt'].values
overlap_matrix['pr_evt'] = etdata_pr.evt.loc[overlap_matrix['pr'], 'evt'].values
while not(overlap_matrix['matched'].all()):
#select longest overlap
ind = overlap_matrix.loc[~overlap_matrix['matched'], 'l'].argmax()
overlap_matrix.loc[ind, ['selected']]=True
mask_matched = (overlap_matrix['gt']==overlap_matrix.loc[ind, 'gt']).values |\
(overlap_matrix['pr']==overlap_matrix.loc[ind, 'pr']).values
overlap_matrix.loc[mask_matched, 'matched'] = True
overlap_events = overlap_matrix.loc[overlap_matrix['selected'], ['gt', 'pr', 'gt_evt', 'pr_evt']]
#sanity check
evt_gt = etdata_gt.evt.loc[overlap_events['gt'], 'evt']
evt_pr = etdata_pr.evt.loc[overlap_events['pr'], 'evt']
#assert (evt_gt.values == evt_pr.values).all()
#add not matched events
set_gt = set(etdata_gt.evt.index.values) - set(evt_gt.index.values)
set_pr = set(etdata_pr.evt.index.values) - set(evt_pr.index.values)
evt_gt = pd.concat((evt_gt, etdata_gt.evt.loc[set_gt, 'evt']))
evt_pr = pd.concat((evt_pr, pd.DataFrame(np.zeros(len(set_gt)))))
evt_gt = pd.concat((evt_gt, pd.DataFrame(np.zeros(len(set_pr)))))
evt_pr =
|
pd.concat((evt_pr, etdata_pr.evt.loc[set_pr, 'evt']))
|
pandas.concat
|
from __future__ import absolute_import, division, print_function
import datetime
import pandas as pd
from config import *
def _analysis_create_members():
"""
Creates a table with members data
:return:
"""
logger.info("Creating members table")
members_metadata =
|
pd.read_csv(members_metadata_path)
|
pandas.read_csv
|
# ALS.py
# Author: <NAME>
# Version: 1.2.0
# Last Updated: 01/12/19
# This code is designed to be imported and run inside a Jupyter notebook using an iPython kernel.
'''
ALS.py DEPENDENCIES
This code was developed and tested using the following packages/versions.
Other versions may also work.
python 3.6.1
numpy 1.12.1
pandas 0.20.1
scipy 0.19.0
matplotlib 2.2.2
ipython 5.3.0
ALS.py MAJOR UPDATE HISTORY
1.0.0 - 07/10/18 - Initial release (__init__, fit, plot_data_model, plot_model, _time_axis, _model, and _conv_IRF methods).
1.1.0 - 07/25/18 - Added bootstrap method.
1.1.1 - 10/04/18 - Added conc_units kwarg for fit, plot_data_model, and boostrap methods.
1.1.2 - 01/09/19 - Added quiet kwarg to bootstrap method.
1.2.0 - 01/12/19 - Added preliminary monte_carlo_params method.
'''
# TODO:
# Add checks to make sure the inputs from the user have the correct formatting, throw errors if they are not (ex: df_data err field can have no zeros)
# Adjust random sampling methodology of monte carlo parameter sampling to accomodate parameters with negative values and correlation between parameters
import numpy as np
import pandas as pd
from scipy.optimize import leastsq
from scipy.stats import truncnorm
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from IPython.display import display, clear_output
class KineticModel:
_dt = 0.02 # Fundamental kinetic time step (ms)
def __init__(self, user_model, err_weight=True, fit_pre_photo=False, apply_IRF=True, apply_PG=True, t_PG=1.0):
'''
Initializor for a KineticModel object.
See ex_notebook_1.ipynb for API documentation.
'''
self._user_model = user_model
self._err_weight = err_weight
self._fit_pre_photo = fit_pre_photo
self._apply_IRF = apply_IRF
self._apply_PG = apply_PG
self._t_PG = t_PG
def fit(self, t, tbin, df_data, df_model_params, df_ALS_params, delta_xtick=20.0, conc_units=False, save_fn=None, quiet=False, **kwargs):
'''
Method for fitting data and optimizing parameters.
See ex_notebook_1.ipynb for API documentation.
'''
# Check fit t0 / fit_pre_photo
if df_ALS_params.at['t0','fit'] and not self._fit_pre_photo:
print('ERROR: Fit t0 is True but fit_pre_photo is False!')
print('If we are fitting t0, then we must also fit the pre-photolysis data.')
print('Otherwise, the cost function would be minimized by arbitrarily increasing t0.')
return None, None, None, None, None, None
# Determine start time, end time, and range of times over which to fit
t_start = float(t.min())
t_end = float(t.max())
idx_cost = np.full(t.shape, True) if self._fit_pre_photo else (t >= df_ALS_params.at['t0','val']) # Boolean array of indices to use in cost calculation
# Establish corrspondence between data and model time axes
# Each entry in t has an exact match to an entry in t_model
# We take the below approach (rather than ==) to prevent any problems with numerical roundoff
t_model = self._time_axis(t_start, t_end, tbin)
idx_model = [np.abs(t_model - t[_]).argmin() for _ in range(t.size)] # Array of positional indices, maps model axis --> data axis
# Organize fitted species data into data_val and data_err frames
# Columns of data_val and data_err are species and the rows correspond to times in t array
data_fit = df_data[df_data['fit']]
species_names = list(data_fit.index)
data_val = pd.DataFrame(list(data_fit['val']), index=species_names).T
if self._err_weight:
data_err = pd.DataFrame(list(data_fit['err']), index=species_names).T
# Organize the fit parameters
model_params_fit = df_model_params[df_model_params['fit']]
ALS_params_fit = df_ALS_params[df_ALS_params['fit']]
p_names = list(model_params_fit.index) + list(ALS_params_fit.index)
p0 = np.concatenate((model_params_fit['val'], ALS_params_fit['val']))
# Define the cost function to be optimized
def calc_cost(p):
# Organize parameter values used for the current iteration of the fit into dictionaries
model_params_p = {}
for param in df_model_params.index:
model_params_p[param] = p[p_names.index(param)] if param in p_names else df_model_params.at[param,'val']
ALS_params_p = {}
for param in df_ALS_params.index:
ALS_params_p[param] = p[p_names.index(param)] if param in p_names else df_ALS_params.at[param,'val']
# Run the model - we only need the concentrations dataframe
_, c_model = self._model(t_start, t_end, tbin, model_params_p, ALS_params_p)
# Calculate the weighted residual array across points included in the cost computation
res = []
for species in species_names:
obs = data_val[species]
mod = ALS_params_p['S_'+species]*c_model[species]
# We take the sqrt of the weight since leastsq will square the array later
# Important to perform .values conversion to array BEFORE we subtract obs and mod (pandas subtracts Series by index agreement not position)
species_res = np.sqrt(data_fit.at[species,'weight']) * (obs.values - mod.values[idx_model])[idx_cost]
if self._err_weight:
err = data_err[species]
species_res = species_res / err.values[idx_cost]
res.append(species_res)
# leastsq will square then sum all entries in the returned array, and minimize this cost value
return np.concatenate(res)
if not quiet:
print('Initial Cost Function Value: {:g}'.format((calc_cost(p0)**2).sum()))
print()
# Perform the fit
# NOTE: The backend of leastsq will automatically autoscale the fit parameters to the same order of magnitude if diag=None (default).
p, cov_p, infodict, mesg, ier = leastsq(calc_cost, p0, full_output=True, **kwargs)
# Calculate minimized cost value
cost = (infodict['fvec']**2).sum()
# Prepare covariance and correlation matrices
if cov_p is not None:
# Scale the covariance matrix according to documentation for leastsq and source code for curve_fit.
# Scale factor is cost / (# of data points - # fit parameters).
M = infodict['fvec'].size
N = p.size
cov_p *= cost / (M-N)
# Compute standard errors
p_err = np.sqrt(np.diag(cov_p))
# Compute correlation matrix
corr_p = np.array([[cov_p[i][j] / (np.sqrt(cov_p[i][i]*cov_p[j][j])) for j in range(cov_p.shape[1])] for i in range(cov_p.shape[0])])
else:
p_err = np.full(p.shape, np.NaN)
corr_p = None
# Convert fit results to dataframes
df_p = pd.DataFrame(np.array((p,p_err)).T, index=p_names, columns=('val','err'))
df_cov_p =
|
pd.DataFrame(cov_p, index=p_names, columns=p_names)
|
pandas.DataFrame
|
import bisect
import os.path as osp
from collections import defaultdict
import json
import numpy as np
import scipy.linalg as LA
from scipy.ndimage import binary_dilation, generate_binary_structure
import pandas as pd
from PIL import Image
from tabulate import tabulate
from panopticapi.utils import rgb2id
from panoptic.pan_eval import PQStat, OFFSET, VOID
from panoptic.datasets.base import mapify_iterable
from fabric.io import load_object, save_object
_SEGMENT_UNMATCHED = 0
_SEGMENT_MATCHED = 1
_SEGMENT_FORGIVEN = 2
def generalized_aspect_ratio(binary_mask):
xs, ys = np.where(binary_mask)
coords = np.array([xs, ys]).T
# mean center the coords
coords = coords - coords.mean(axis=0)
# cov matrix
cov = coords.T @ coords
first, second = LA.eigvalsh(cov)[::-1]
ratio = (first ** 0.5) / (second + 1e-8) ** 0.5
return ratio
class Annotation():
'''
Overall Schema for Each Side (Either Gt or Pred)
e.g. pred:
cats: {id: cat}
imgs: {id: image}
segs {sid: seg}
img2seg: {image_id: {sid: seg}}
cat2seg: {cat_id: {sid: seg}}
cat:
id: 7,
name: road,
supercategory: 'flat',
color: [128, 64, 128],
isthing: 0
image:
id: 'frankfurt_000000_005898',
file_name: frankfurt/frankfurt_000000_005898_leftImg8bit.png
ann_fname: abcde
width: 2048,
height: 1024,
---
mask: a cached mask that is loaded
seg:
sid (seg id): gt/frankfurt_000000_000294/8405120
image_id: frankfurt_000000_000294
id: 8405120
category_id: 7
area: 624611
bbox: [6, 432, 1909, 547]
iscrowd: 0
match_state: one of (UNMATCHED, MATCHED, IGNORED)
matchings: {sid: iou} sorted from high to low
(breakdown_flag): this can be optinally introduced for breakdown analysis
'''
def __init__(self, json_meta_fname, is_gt, state_dict=None):
'''
if state_dict is provided, then just load it and avoid the computation
'''
dirname, fname = osp.split(json_meta_fname)
self.root = dirname
self.mask_root = osp.join(dirname, fname.split('.')[0])
if state_dict is None:
with open(json_meta_fname) as f:
state_dict = self.process_raw_meta(json.load(f), is_gt)
self.state_dict = state_dict
self.register_state(state_dict)
def register_state(self, state):
for k, v in state.items():
setattr(self, k, v)
@staticmethod
def process_raw_meta(raw_meta, is_gt):
state = dict()
state['cats'] = mapify_iterable(raw_meta['categories'], 'id')
state['imgs'] = mapify_iterable(raw_meta['images'], 'id')
state['segs'] = dict()
state['img2seg'] = defaultdict(dict)
state['cat2seg'] = defaultdict(dict)
sid_prefix = 'gt' if is_gt else 'pd'
for ann in raw_meta['annotations']:
image_id = ann['image_id']
segments = ann['segments_info']
state['imgs'][image_id]['ann_fname'] = ann['file_name']
for seg in segments:
cat_id = seg['category_id']
unique_id = '{}/{}/{}'.format(sid_prefix, image_id, seg['id'])
seg['sid'] = unique_id
seg['image_id'] = image_id
seg['match_state'] = _SEGMENT_FORGIVEN
seg['matchings'] = dict()
state['segs'][unique_id] = seg
state['img2seg'][image_id][unique_id] = seg
state['cat2seg'][cat_id][unique_id] = seg
return state
def seg_sort_matchings(self):
"""sort matchings from high to low IoU"""
for _, seg in self.segs.items():
matchings = seg['matchings']
seg['matchings'] = dict(
sorted(matchings.items(), key=lambda x: x[1], reverse=True)
)
def match_summarize(self, breakdown_flag=None):
'''
ret: [num_cats, 4] where each row contains
(iou_sum, num_matched, num_unmatched, total_inst)
'''
ret = []
for cat in sorted(self.cats.keys()):
segs = self.cat2seg[cat].values()
iou_sum, num_matched, num_unmatched, total_inst = 0.0, 0.0, 0.0, 0.0
for seg in segs:
if breakdown_flag is not None and seg['breakdown_flag'] != breakdown_flag:
continue # if breakdown is activated, only summarize those required
total_inst += 1
if seg['match_state'] == _SEGMENT_MATCHED:
iou_sum += list(seg['matchings'].values())[0]
num_matched += 1
elif seg['match_state'] == _SEGMENT_UNMATCHED:
num_unmatched += 1
ret.append([iou_sum, num_matched, num_unmatched, total_inst])
ret = np.array(ret)
return ret
def catId_given_catName(self, catName):
for catId, cat in self.cats.items():
if cat['name'] == catName:
return catId
raise ValueError('what kind of category is this? {}'.format(catName))
def get_mask_given_seg(self, seg):
return self.get_mask_given_imgid(self, seg['image_id'])
def get_img_given_imgid(self, image_id, img_root):
img = self.imgs[image_id]
img_fname = img['file_name']
img_fname = osp.join(img_root, img_fname)
img = Image.open(img_fname)
return img
def get_mask_given_imgid(self, image_id, store_in_cache=True):
img = self.imgs[image_id]
_MASK_KEYNAME = 'mask'
cache_entry = img.get(_MASK_KEYNAME, None)
if cache_entry is not None:
assert isinstance(cache_entry, np.ndarray)
return cache_entry
else:
mask_fname = img['ann_fname']
mask = np.array(
Image.open(osp.join(self.mask_root, mask_fname)),
dtype=np.uint32
)
mask = rgb2id(mask)
if store_in_cache:
img[_MASK_KEYNAME] = mask
return mask
def compute_seg_shape_oddity(self):
print('start computing shape oddity')
i = 0
for imgId, segs in self.img2seg.items():
i += 1
if (i % 50) == 0:
print(i)
mask = self.get_mask_given_imgid(imgId, store_in_cache=False)
for _, s in segs.items():
seg_id = s['id']
binary_mask = (mask == seg_id)
s['gen_aspect_ratio'] = generalized_aspect_ratio(binary_mask)
def compute_seg_boundary_stats(self):
print('start computing boundary stats')
i = 0
for imgId, segs in self.img2seg.items():
i += 1
if (i % 50) == 0:
print(i)
mask = self.get_mask_given_imgid(imgId, store_in_cache=False)
for _, s in segs.items():
seg_id = s['id']
binary_mask = (mask == seg_id)
self._per_seg_neighbors_stats(s, binary_mask, mask)
def _per_seg_neighbors_stats(self, seg_dict, binary_mask, mask):
area = binary_mask.sum()
# struct = generate_binary_structure(2, 2)
dilated = binary_dilation(binary_mask, structure=None, iterations=1)
boundary = dilated ^ binary_mask
# stats
length = boundary.sum()
ratio = length ** 2 / area
seg_dict['la_ratio'] = ratio
# get the neighbors
ids, cnts = np.unique(mask[boundary], return_counts=True)
sid_prefix = '/'.join(
seg_dict['sid'].split('/')[:2] # throw away the last
)
sids = [ '{}/{}'.format(sid_prefix, id) for id in ids ]
thing_neighbors = {
sid: cnt for sid, id, cnt in zip(sids, ids, cnts)
if id > 0 and self.cats[self.segs[sid]['category_id']]['isthing']
}
seg_dict['thing_neighbors'] = thing_neighbors
class PanopticEvalAnalyzer():
def __init__(self, gt_json_meta_fname, pd_json_meta_fname, load_state=True):
# use the pd folder as root directory since a single gt ann can correspond
# to many pd anns.
root = osp.split(pd_json_meta_fname)[0]
self.state_dump_fname = osp.join(root, 'analyze_dump.pkl')
is_evaluated = False
if osp.isfile(self.state_dump_fname) and load_state:
state = load_object(self.state_dump_fname)
gt_state, pd_state = state['gt'], state['pd']
is_evaluated = True
else:
gt_state, pd_state = None, None
self.gt = Annotation(gt_json_meta_fname, is_gt=True, state_dict=gt_state)
self.pd = Annotation(pd_json_meta_fname, is_gt=False, state_dict=pd_state)
# validate that gt and pd json completely match
assert self.gt.imgs.keys() == self.pd.imgs.keys()
assert self.gt.cats == self.pd.cats
self.imgIds = list(sorted(self.gt.imgs.keys()))
if not is_evaluated:
# evaluate and then save the state
self._evaluate()
self.gt.compute_seg_shape_oddity()
self.pd.compute_seg_shape_oddity()
self.dump_state()
def _gt_boundary_stats(self):
self.gt.compute_seg_boundary_stats()
def dump_state(self):
state = {
'gt': self.gt.state_dict,
'pd': self.pd.state_dict
}
save_object(state, self.state_dump_fname)
def _evaluate(self):
stats = PQStat()
cats = self.gt.cats
for i, imgId in enumerate(self.imgIds):
if (i % 50) == 0:
print("progress {} / {}".format(i, len(self.imgIds)))
# if (i > 100):
# break
gt_ann = {
'image_id': imgId, 'segments_info': self.gt.img2seg[imgId].values()
}
gt_mask = np.array(
Image.open(osp.join(
self.gt.mask_root, self.gt.imgs[imgId]['ann_fname']
)),
dtype=np.uint32
)
gt_mask = rgb2id(gt_mask)
pd_ann = {
'image_id': imgId, 'segments_info': self.pd.img2seg[imgId].values()
}
pd_mask = np.array(
Image.open(osp.join(
self.pd.mask_root, self.pd.imgs[imgId]['ann_fname']
)),
dtype=np.uint32
)
pd_mask = rgb2id(pd_mask)
_single_stat = self.pq_compute_single_img(
cats, gt_ann, gt_mask, pd_ann, pd_mask
)
stats += _single_stat
self.gt.seg_sort_matchings()
self.pd.seg_sort_matchings()
return stats
def summarize(self, flag=None):
per_cat_res, overall_table, cat_table = self._aggregate(
gt_stats=self.gt.match_summarize(flag),
pd_stats=self.pd.match_summarize(flag),
cats=self.gt.cats
)
return per_cat_res, overall_table, cat_table
@staticmethod
def _aggregate(gt_stats, pd_stats, cats):
'''
Args:
pd/gt_stats: [num_cats, 4] with each row contains
(iou_sum, num_matched, num_unmatched, total_inst)
cats: a dict of {catId: catMetaData}
Returns:
1. per cat pandas dataframe; easy to programmatically manipulate
2. str formatted overall result table
3. str formatted per category result table
'''
# each is of shape [num_cats]
gt_iou, gt_matched, gt_unmatched, gt_tot_inst = gt_stats.T
pd_iou, pd_matched, pd_unmatched, pd_tot_inst = pd_stats.T
assert np.allclose(gt_iou, pd_iou) and (gt_matched == pd_matched).all()
catIds = list(sorted(cats.keys()))
catNames = [cats[id]['name'] for id in catIds]
isthing = np.array([cats[id]['isthing'] for id in catIds], dtype=bool)
RQ = gt_matched / (gt_matched + 0.5 * gt_unmatched + 0.5 * pd_unmatched)
SQ = gt_iou / gt_matched
RQ, SQ = np.nan_to_num(RQ), np.nan_to_num(SQ)
PQ = RQ * SQ
results = np.array([PQ, SQ, RQ]) * 100 # [3, num_cats]
overall_table = tabulate(
headers=['', 'PQ', 'SQ', 'RQ', 'num_cats'],
floatfmt=".2f", tablefmt='fancy_grid',
tabular_data=[
['all'] + list(map(lambda x: x.mean(), results)) + [len(catIds)],
['things'] + list(map(lambda x: x[isthing].mean(), results)) + [sum(isthing)],
['stuff'] + list(map(lambda x: x[~isthing].mean(), results)) + [sum(1 - isthing)],
]
)
headers = (
'PQ', 'SQ', 'RQ',
'num_matched', 'gt_unmatched', 'pd_unmatched', 'tot_gt_inst',
'isthing'
)
results = np.array(
list(results) + [gt_matched, gt_unmatched, pd_unmatched, gt_tot_inst, isthing]
)
results = results.T
data_frame =
|
pd.DataFrame(results, columns=headers, index=catNames)
|
pandas.DataFrame
|
# HOW TO RUN
# python train.py --model output/model.pth --plot output/plot.png
# import the necessary libraries
from torchvision.transforms import RandomHorizontalFlip
from torch.utils.data import WeightedRandomSampler
from sklearn.metrics import classification_report
from torchvision.transforms import RandomCrop
from torchvision.transforms import Grayscale
from torchvision.transforms import ToTensor
from torch.utils.data import random_split
from torch.utils.data import DataLoader
from neuraspike import config as cfg
from neuraspike import EarlyStopping
from neuraspike import LRScheduler
from torchvision import transforms
from neuraspike import EmotionNet
from torchvision import datasets
import matplotlib.pyplot as plt
from collections import Counter
from datetime import datetime
from torch.optim import SGD
import torch.nn as nn
import pandas as pd
import argparse
import torch
import math
# initialize the argument parser and establish the arguments required
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, help='Path to save the trained model')
parser.add_argument('-p', '--plot', type=str, help='Path to save the loss/accuracy plot')
args = vars(parser.parse_args())
# configure the device to use for training the model, either gpu or cpu
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"[INFO] Current training device: {device}")
# initialize a list of preprocessing steps to apply on each image during
# training/validation and testing
train_transform = transforms.Compose([
Grayscale(num_output_channels=1),
RandomHorizontalFlip(),
RandomCrop((48, 48)),
ToTensor()
])
test_transform = transforms.Compose([
Grayscale(num_output_channels=1),
ToTensor()
])
# load all the images within the specified folder and apply different augmentation
train_data = datasets.ImageFolder(cfg.trainDirectory, transform=train_transform)
test_data = datasets.ImageFolder(cfg.testDirectory, transform=test_transform)
# extract the class labels and the total number of classes
classes = train_data.classes
num_of_classes = len(classes)
print(f"[INFO] Class labels: {classes}")
# use train samples to generate train/validation set
num_train_samples = len(train_data)
train_size = math.floor(num_train_samples * cfg.TRAIN_SIZE)
val_size = math.ceil(num_train_samples * cfg.VAL_SIZE)
print(f"[INFO] Train samples: {train_size} ...\t Validation samples: {val_size}...")
# randomly split the training dataset into train and validation set
train_data, val_data = random_split(train_data, [train_size, val_size])
# modify the data transform applied towards the validation set
val_data.dataset.transforms = test_transform
# get the labels within the training set
train_classes = [label for _, label in train_data]
# count each labels within each classes
class_count = Counter(train_classes)
print(f"[INFO] Total sample: {class_count}")
# compute and determine the weights to be applied on each category
# depending on the number of samples available
class_weight = torch.Tensor([len(train_classes) / c
for c in
|
pd.Series(class_count)
|
pandas.Series
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from urllib.request import urlopen
from urllib.parse import urlencode, unquote, quote_plus
import urllib , requests , json, datetime, logging,xmltodict, subprocess
import pandas as pd
log = logging.getLogger('detect')
log.setLevel(logging.DEBUG)
log_handler = logging.StreamHandler()
log.addHandler(log_handler)
weather_key = "BZF4W49MNogC/5NdkMns/q8XPYfp/T5U2csm3nasMwRH28LLCUEzoLrnMOhO2mdkQHFYTEChLs5XdbpaM/rXpg=="
firekey = '<KEY>'
normal_gpio = 68
high_gpio = 70
danger_gpio = 71
xylist = pd.read_csv("./locate_data.csv", encoding='CP949', error_bad_lines=False)
uniq_xylist = xylist[['1단계', '2단계', '3단계','격자 X', '격자 Y']].drop_duplicates()
txt_path="./filecontrol"
def test_func():
global now_weather
now_weather = nowcast("성수1가제1동")
now_firecast = firecast("성수동2가")
time = datetime.datetime.now()
now = time.strftime("%H%M")
return "성수1가제1동 " + now + now_weather + " 산불위험 " + now_firecast
def find_xy(loc):
xylist = pd.read_csv("./locate_data.csv", encoding='CP949', error_bad_lines=False)
uniq_xylist = xylist[['1단계', '2단계', '3단계','격자 X', '격자 Y']].drop_duplicates()
try:
f_line = uniq_xylist[uniq_xylist['3단계'].str.contains(loc, na=False)]
except:
try:
f_line = uniq_xylist[uniq_xylist['2단계'].str.contains(loc, na=False)]
except:
try:
f_line = uniq_xylist[uniq_xylist['1단계'].str.contains(loc, na=False)]
except:
log.info("주소가 잘못되었습니다")
return 0,0
xy_list = f_line[["격자 X", "격자 Y"]].values
return xy_list[0][0], xy_list[0][1]
def find_localArea(loc):
list =
|
pd.read_csv("./location_firecast.csv", encoding='utf-8', error_bad_lines=False)
|
pandas.read_csv
|
import requests
import json
import pandas as pd
from functools import reduce
def ig_get(base_url, endpoint_parameters, to_df = True):
"""
Send Request to Faceboook endpoint
base_url: str url to point to. Consist of endpoint_base and client_id/page_id depending on endpoint to request
endpoint_parameters: dict Parameters to include in the request
to_df: bool flag to transform response to pd.DataFrame
return: list or df depending on 'to_df'
"""
req = requests.get(base_url, endpoint_parameters)
respond = json.loads(req.content)
if to_df:
respond = pd.DataFrame(respond['data'])
return respond
def ig_media_insight(insight_list, endpoint_parameters, metrics = 'engagement,impressions,reach,saved'):
"""
Loop over ig media posts to get deeper insight and convert to tidy df
Source: https://towardsdatascience.com/discover-insights-from-your-instagram-business-account-with-facebook-graph-api-and-python-81d20ee2e751
insight_list: list of media, response from '/media' endpoint
endpoint_parameters: dict Parameters to include in the request
metrics: str metric names comma-separated
return: pd.df
"""
media_insight = []
# Loop Over 'Media ID'
for imedia in insight_list['data']:
# Define URL
url = endpoint_parameters['endpoint_base'] + imedia['id'] + '/insights'
# Define Endpoint Parameters
parameters_media = dict()
parameters_media['metric'] = metrics
parameters_media['access_token'] = endpoint_parameters['access_token']
# Requests Data
media_data = requests.get(url, parameters_media )
json_media_data = json.loads(media_data.content)
media_insight.append(list(json_media_data['data']))
# Initialize Empty Container
engagement_list = []
impressions_list = []
reach_list = []
saved_list = []
# Loop Over Insights to Fill Container
for insight in media_insight:
engagement_list.append(insight[0]['values'][0]['value'])
impressions_list.append(insight[1]['values'][0]['value'])
reach_list.append(insight[2]['values'][0]['value'])
saved_list.append(insight[3]['values'][0]['value'])
# Create DataFrame
media_insight = list(zip(engagement_list, impressions_list, reach_list, saved_list))
media_insight_df =
|
pd.DataFrame(media_insight, columns =['engagement', 'impressions', 'reach', 'saved'])
|
pandas.DataFrame
|
import pandas as pd
import re
# Exception for incorrect chromosome namings
class IncorrectChr(Exception):
def __init__(self):
Exception.__init__(self, "Incorrect chromosome naming in file: Chromosomes must be named chr1 - chr22.")
# Exception for incorrect positional coordinates
class IncorrectCoord(Exception):
def __init__(self):
Exception.__init__(self,
"Incorrect start and end coordinates. coordinates must not be larger than 2^32 or less than "
"1. End coordinates must be greater than start coordinates")
# Exception for incorrect feature name
class IncorrectFeatureName(Exception):
def __init__(self):
Exception.__init__(self,
"feature name must be alphanumeric and can only contain hyphen, underscore and parentheses")
# Exception for incorrect strand characters
class IncorrectStrand(Exception):
def __init__(self):
Exception.__init__(self, "Strand must be + or -")
# Function to read BED file, parse and add to pandas dataframe
def load_file(filename):
#Try to open file and throw error if cannot be opened/found.
try:
fd = open(filename, 'r')
except FileNotFoundError as fnf_error:
print(fnf_error)
else:
#definition of max and min allowed coordinates
max_coord = 2 ** 32
min_coord = 1
bed_list = []
#Read BED file line by line
for line in fd:
bed_temp = line.strip().split("\t")
# Check if chromosome is correctly named. Chr1 - chr22
try:
if not re.match("^(chr2[0-2]|chr1[0-9]|chr[1-9])$", bed_temp[0]):
raise IncorrectChr()
except IncorrectChr:
print("Offending Line: %s" % (line.strip()))
fd.close()
raise
else:
#Remove chr string as per specification
bed_temp[0] = bed_temp[0].replace("chr", "")
bed_temp[1] = int(bed_temp[1])
bed_temp[2] = int(bed_temp[2])
# Check if start and end coordinates are as per specification
try:
if bed_temp[1] > max_coord or bed_temp[2] > max_coord or bed_temp[1] < min_coord or \
bed_temp[2] < min_coord or bed_temp[2] < bed_temp[1]:
raise IncorrectCoord()
except IncorrectCoord:
print("Offending Line: %s" % (line.strip()))
fd.close()
raise
# Check if feature name contains acceptable characters only
try:
if re.match('^[a-zA-Z0-9-_()]+$', bed_temp[3]) is None:
raise IncorrectFeatureName()
except IncorrectFeatureName:
print("Offending Line: %s" % (line.strip()))
fd.close()
raise
# Check if strand contains acceptable characters only
try:
if bed_temp[4] not in ["-", "+"]:
raise IncorrectStrand()
except IncorrectStrand:
print("Offending Line: %s" % (line.strip()))
fd.close()
raise
#If passed all checks, add line to list
bed_list.append(bed_temp)
# Create pandas dataframe
header = ["chrom", "start", "end", "name", "strand"]
bed = pd.DataFrame(bed_list, columns=header)
fd.close()
return bed
def search_position(df, chrom, start=None, end=None):
max_coord = 2 ** 32
min_coord = 1
# Check if chromosome is correctly named. Chr1 - chr22
try:
if not re.match("^(chr2[0-2]|chr1[0-9]|chr[1-9])$", chrom):
raise IncorrectChr()
except IncorrectChr:
print("Search Input: %s" % chrom)
raise
else:
chrom = chrom.replace("chr", "")
# If start and end are provided, do positional subset
if (start is not None) and (end is not None):
# Check if input start and end coordinates are as per specification
try:
if start > max_coord or end > max_coord or start < min_coord or end < min_coord or end < start:
raise IncorrectCoord()
except IncorrectCoord:
print("Search start and end input: %s %s" % (start, end))
raise
subset = df.loc[((df['chrom'] == chrom) & (df['start'] >= start) & (df['end'] < end))].reset_index(drop=True)
if subset.empty:
return None
else:
return subset
# Otherwise, just subset on chromosome
else:
subset = df.loc[df['chrom'] == chrom].reset_index(drop=True)
if subset.empty:
return None
else:
return subset
def search_featurename(df, name):
# Check if feature name contains acceptable characters only
try:
if re.match('^[a-zA-Z0-9-_()]+$', name) is None:
raise IncorrectFeatureName()
except IncorrectFeatureName:
print("Search feature name input: %s" % name)
raise
else:
# Subset based on feature name.
subset = df.loc[df['name'] == name].reset_index(drop=True)
if subset.empty:
return None
else:
return subset
def summary_statistics(data):
# Copy data to not overwrite original dataframe
df_copy = data.copy()
# Calculate length as end - start
df_copy['length'] = df_copy['end'] - df_copy['start']
# Calculate number of features per chromosome
stats = df_copy.groupby(['chrom'])['chrom'].count()
stats =
|
pd.DataFrame(stats)
|
pandas.DataFrame
|
# coding: utf-8
import pandas as pd
import numpy as np
import pymorphy2 as pm2
from sklearn import preprocessing
import matplotlib.pyplot as plt
from operator import itemgetter
from itertools import groupby
import collections as clcts
import time
import re
import sys
import random as rnd
class path:
def __init__(self):
self.jams_on_month = 'traffic_jams/jams_on_month.csv'
self.jams_on_week = 'traffic_jams/jams_on_week.csv'
self.convertibility = 'traffic_jams/convertibility.csv'
path = path()
def df_to_csv(path = 'traffic_jams/', name = 'preproc_.csv'):
df.to_csv(path + name, encoding='UTF-8')
def df_from_csv(path = 'traffic_jams/', name = 'preproc_.csv'):
return pd.read_csv(path + name, encoding='UTF-8')
def get_week_day_jams(jow):
week_jams = {
'пн' : [],
'вт' : [],
'ср' : [],
'чт' : [],
'пт' : [],
'сб' : [],
'вс' : []
}
for indx, day in zip(range(0, len(jow), 24), week_jams.keys()):
week_jams[day] = list(jow[indx:indx+24].point)
return week_jams
def get_year_month_jams(joy):
year_month_jams = {
2016 : [],
2017 : [],
2018 : []
}
for indx, year in zip(range(1, len(joy), 12), year_month_jams.keys()):
year_month_jams[year] = list(joy[indx-1:indx+11].weight)
return normalize_year_month_jams(year_month_jams)
def normalize_year_month_jams(year_month_jams):
norm = preprocessing.normalize( list(year_month_jams.values()), norm='l1' )
for indx, year in enumerate(year_month_jams.keys()):
year_month_jams[year] = norm[indx]
return year_month_jams
def get_compromise_point(points):
dsp = 0.333
return np.mean([int(str(points)[0]), int(str(points)[1])]) + rnd.uniform(-dsp, dsp)
def uniq_jam_point(year, month, day, hour):
global year_month_jams
global week_day_jams
global con
if con.point[hour]:
season_weight = year_month_jams[year][month-1]
else:
dsp = 0.010
season_weight = rnd.uniform(-dsp, dsp)
week_weight = week_day_jams[day][hour]
if week_weight > 10:
week_weight = get_compromise_point(week_weight)
return week_weight * (1 + season_weight)
joy = pd.read_csv(path.jams_on_month, encoding='UTF-8')
jow = pd.read_csv(path.jams_on_week, encoding='UTF-8')
con =
|
pd.read_csv(path.convertibility, encoding='UTF-8')
|
pandas.read_csv
|
import json
import pandas as pd
import time
#################################
#
#with open('logs.json', 'r') as data:
# data = data.read()
#
#logs = json.loads(data)
#
########################
def get_data(file):
with open(file, 'r') as data:
data = data.read()
logs = json.loads(data)
#s = Sender('Test', '192.168.1.214')
#logs = s.list_logs()
df =
|
pd.DataFrame(columns=['acquired_time'])
|
pandas.DataFrame
|
from __future__ import absolute_import
from __future__ import division
import collections
import math
import os
import sys
import random
import numpy as np
from six.moves import urllib
from six.moves import xrange
import tensorflow as tf
from timeit import default_timer as timer
import pandas as pd
import re
import io
from nltk.tokenize import TweetTokenizer
import num2words
base_path = os.environ['HOMEPATH']
data_folder='data'
training_filename = os.path.join(base_path, data_folder, 'training_text.csv')
model_identifier = 'Word2Vec_Basic'
embedding_folder = os.path.join(base_path, 'vectors')
if not os.path.exists(embedding_folder):
os.makedirs(embedding_folder)
batch_size = 128 # Batch size
embedding_size = 50 # Dimension of the embedding vector.
skip_window = 2 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
# Data processing and batch preparation
# In the following code, we replace Emails, URLS, emoticons etc with special labels
pos_emoticons=["(^.^)","(^-^)","(^_^)","(^_~)","(^3^)","(^o^)","(~_^)","*)",":)",":*",":-*",":]",":^)",":}",
":>",":3",":b",":-b",":c)",":D",":-D",":O",":-O",":o)",":p",":-p",":P",":-P",":Þ",":-Þ",":X",
":-X",";)",";-)",";]",";D","^)","^.~","_)m"," ~.^","<=8","<3","<333","=)","=///=","=]","=^_^=",
"=<_<=","=>.<="," =>.>="," =3","=D","=p","0-0","0w0","8D","8O","B)","C:","d'-'","d(>w<)b",":-)",
"d^_^b","qB-)","X3","xD","XD","XP","ʘ‿ʘ","❤","💜","💚","💕","💙","💛","💓","💝","💖","💞",
"💘","💗","😗","😘","😙","😚","😻","😀","😁","😃","☺","😄","😆","😇","😉","😊","😋","😍",
"😎","😏","😛","😜","😝","😮","😸","😹","😺","😻","😼","👍"]
neg_emoticons=["--!--","(,_,)","(-.-)","(._.)","(;.;)9","(>.<)","(>_<)","(>_>)","(¬_¬)","(X_X)",":&",":(",":'(",
":-(",":-/",":-@[1]",":[",":\\",":{",":<",":-9",":c",":S",";(",";*(",";_;","^>_>^","^o)","_|_",
"`_´","</3","<=3","=/","=\\",">:(",">:-(","💔","☹️","😌","😒","😓","😔","😕","😖","😞","😟",
"😠","😡","😢","😣","😤","😥","😦","😧","😨","😩","😪","😫","😬","😭","😯","😰","😱","😲",
"😳","😴","😷","😾","😿","🙀","💀","👎"]
# Emails
emailsRegex=re.compile(r'[\w\.-]+@[\w\.-]+')
# Mentions
userMentionsRegex=re.compile(r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)')
#Urls
urlsRegex=re.compile('r(f|ht)(tp)(s?)(://)(.*)[.|/][^ ]+') # It may not be handling all the cases like t.co without http
#Numerics
numsRegex=re.compile(r"\b\d+\b")
punctuationNotEmoticonsRegex=re.compile(r'(?<=\w)[^\s\w](?![^\s\w])')
emoticonsDict = {}
for i,each in enumerate(pos_emoticons):
emoticonsDict[each]=' POS_EMOTICON_'+num2words.num2words(i).upper()+' '
for i,each in enumerate(neg_emoticons):
emoticonsDict[each]=' NEG_EMOTICON_'+num2words.num2words(i).upper()+' '
# use these three lines to do the replacement
rep = dict((re.escape(k), v) for k, v in emoticonsDict.items())
emoticonsPattern = re.compile("|".join(rep.keys()))
def read_data(filename):
"""Read the raw tweet data from a file. Replace Emails etc with special tokens """
with open(filename, 'r') as f:
all_lines=f.readlines()
padded_lines=[]
for line in all_lines:
line = emoticonsPattern.sub(lambda m: rep[re.escape(m.group(0))], line.lower().strip())
line = userMentionsRegex.sub(' USER ', line )
line = emailsRegex.sub(' EMAIL ', line )
line=urlsRegex.sub(' URL ', line)
line=numsRegex.sub(' NUM ',line)
line=punctuationNotEmoticonsRegex.sub(' PUN ',line)
line=re.sub(r'(.)\1{2,}', r'\1\1',line)
words_tokens=[token for token in TweetTokenizer().tokenize(line)]
line= ' '.join(token for token in words_tokens )
padded_lines.append(line)
padded_data=' '.join(line for line in padded_lines)
encoded_data=tf.compat.as_str(padded_data).split()
return encoded_data
def build_dataset(words, n_words):
"""Get raw tokens and build count dictionaries etc"""
count = [['<UNK>', -1]]
count.extend(collections.Counter(words).most_common(n_words))
dictionary = {}
reverse_dictionary={}
for word, _ in count:
temp = len(dictionary)
dictionary[word]=temp
reverse_dictionary[temp]=word
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
return data, count, dictionary, reverse_dictionary
def generate_batch(data,batch_size, num_skips, skip_window ):
"""generate batches of data"""
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
# Model Training
# use device='/gpu:0' to train over gpu
def init_model(device='/cpu:0'):
""" initialize model over the input device"""
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.device(device):
# Look up embeddings for inputs.
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
return graph, init, train_inputs, train_labels, valid_dataset, loss, optimizer,similarity,normalized_embeddings
def training(graph, init, train_inputs, train_labels, valid_dataset,loss, optimizer,similarity,normalized_embeddings):
""" train model over the tweet data"""
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(data,batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
#print(log_str)
final_embeddings = normalized_embeddings.eval()
return final_embeddings
# Export Embedding
def export_embeddings(final_embeddings):
""" export embeddings to file"""
embedding_weights=pd.DataFrame(final_embeddings).round(6).reset_index()
word_indices_df=pd.DataFrame.from_dict(reverse_dictionary,orient='index').reset_index()
word_indices_df.columns=['index','word']
print (word_indices_df.shape,embedding_weights.shape)
merged=
|
pd.merge(word_indices_df,embedding_weights)
|
pandas.merge
|
from snsql import *
import pandas as pd
import numpy as np
privacy = Privacy(epsilon=3.0, delta=0.1)
class TestPreAggregatedSuccess:
# Test input checks for pre_aggregated
def test_list_success(self, test_databases):
# pass in properly formatted list
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success_df(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute_df(query, pre_aggregated=pre_aggregated)
assert(str(res['sex'][0]) == '1') # it's sorted
def test_np_ndarray_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
pre_aggregated = pre_aggregated.to_numpy()
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_np_array_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
pre_aggregated = np.array(pre_aggregated[1:])
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_spark_df_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="spark"
)
if priv:
pre_aggregated = priv.reader.api.createDataFrame(pre_aggregated[1:], pre_aggregated[0])
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
res = test_databases.to_tuples(res)
assert(str(res[1][0]) == '1') # it's sorted
def test_spark_df_success_df(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="spark"
)
if priv:
pre_aggregated = priv.reader.api.createDataFrame(pre_aggregated[1:], pre_aggregated[0])
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute_df(query, pre_aggregated=pre_aggregated)
assert(str(res['sex'][0]) == '1') # it's sorted
def test_spark_rdd_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="spark"
)
if priv:
pre_aggregated = priv.reader.api.createDataFrame(pre_aggregated[1:], pre_aggregated[0])
pre_aggregated = pre_aggregated.rdd
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
res = test_databases.to_tuples(res)
assert(str(res[1][0]) == '1') # it's sorted
class TestPreAggregatedColumnFail:
# Test input checks for pre_aggregated
def test_list_col_fail(self, test_databases):
# pass in wrongly formatted list
pre_aggregated = [
('count_star', 'sex', 'count_age'),
(1000, 2, 2000),
(1000, 1, 2000)
]
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
try:
_ = priv.execute(query, pre_aggregated=pre_aggregated)
except ValueError:
return
raise AssertionError("execute should have raised an exception")
def test_pandas_col_fail(self, test_databases):
# pass in wrongly formatted dataframe
pre_aggregated = [
('count_star', 'sex', 'count_age'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated =
|
pd.DataFrame(data=pre_aggregated[1:], index=None)
|
pandas.DataFrame
|
import json
import pandas as pd
game_data = json.load(open('game_data_raw.json', encoding='utf-8'))
game_data.extend(json.load(open('game_data_raw_appendix.json', encoding='utf-8')))
midi_db = json.load(open('midi_db.json', encoding='utf-8'))
consoles_mapping = json.load(open('tgdb_consoles_mapping.json', encoding='utf-8'))
game_data = {(x[0][0], x[0][1]): x[1] for x in game_data}
data = []
for midi in midi_db:
midi_data = {'brand': midi['brand'],
'console': midi['console'],
'game': midi['game'],
'title': midi['title'],
'file_name': midi['path'].split('/')[-1]
}
key = (consoles_mapping[midi['console']], midi['game'])
if key in game_data:
for info in ['Developer', 'ESRB', 'GameTitle', 'Overview', 'Platform', 'Players', 'Publisher', 'Rating', 'ReleaseDate', 'id']:
midi_data['tgdb_%s'%(info.lower())] = game_data[key].get(info)
if 'Genres' in game_data[key]:
genres = game_data[key]['Genres'].get('genre')
if isinstance(genres, str):
genres = [genres]
midi_data['tgdb_genres'] = genres
data.append(midi_data)
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python3
"""Take SVG files of individual plots and convert them to CSV."""
import os
import click
import pandas as pd
import svgpathtools
from matplotlib import pyplot as plt
Y_AXIS_SPAN = 80 # Distance in percentage points from baseline to upper and lower lines
X_AXIS_SPAN = 42 # Distance covered by x-axis in days
@click.command()
@click.argument("INPUT_FOLDER")
@click.argument("OUTPUT_FOLDER")
@click.argument("DATES_FILE")
@click.option(
"-p",
"--plots",
is_flag=True,
default=False,
help="Enables creation and saving of additional PNG plots",
)
def main(input_folder, output_folder, dates_file, plots):
"""Turn SVG graphs into CSVs.
Given an input folder of single plot SVGs convert them into CSV files.
Args:
input_folder: Location of SVG files
output_folder: Location to store the CSVs
dates_file: Lookup from x axis steps to date
plots: Boolean flag
Set to true to create png plots from the extracted data
(used for manual inspection checks against source plots)
"""
date_lookup_df =
|
pd.read_csv(dates_file)
|
pandas.read_csv
|
import json
import os
import unittest
from collections import OrderedDict
import numpy as np
import pandas as pd
import xarray as xr
from cate.core.workflow import Workflow, OpStep
from cate.core.workspace import Workspace, WorkspaceError, mk_op_arg, mk_op_args, mk_op_kwargs
from cate.util.undefined import UNDEFINED
from cate.util.opmetainf import OpMetaInfo
NETCDF_TEST_FILE_1 = os.path.join(os.path.dirname(__file__), '..', 'data', 'precip_and_temp.nc')
NETCDF_TEST_FILE_2 = os.path.join(os.path.dirname(__file__), '..', 'data', 'precip_and_temp_2.nc')
class WorkspaceTest(unittest.TestCase):
def test_utilities(self):
self.assertEqual(mk_op_arg(1), {'value': 1})
self.assertEqual(mk_op_arg('2'), {'value': '2'})
self.assertEqual(mk_op_arg('a'), {'value': 'a'})
self.assertEqual(mk_op_arg('@b'), {'source': 'b'})
self.assertEqual(mk_op_args(), [])
self.assertEqual(mk_op_args(1, '2', 'a', '@b'), [{'value': 1}, {'value': '2'}, {'value': 'a'}, {'source': 'b'}])
self.assertEqual(mk_op_kwargs(a=1), OrderedDict([('a', {'value': 1})]))
self.assertEqual(mk_op_kwargs(a=1, b='@c'), OrderedDict([('a', {'value': 1}), ('b', {'source': 'c'})]))
def test_workspace_is_part_of_context(self):
def some_op(ctx: dict) -> dict:
return dict(ctx)
from cate.core.op import OP_REGISTRY
try:
op_reg = OP_REGISTRY.add_op(some_op)
op_reg.op_meta_info.inputs['ctx']['context'] = True
ws = Workspace('/path', Workflow(OpMetaInfo('workspace_workflow', header=dict(description='Test!'))))
ws.set_resource(op_reg.op_meta_info.qualified_name, {}, res_name='new_ctx')
ws.execute_workflow('new_ctx')
self.assertTrue('new_ctx' in ws.resource_cache)
self.assertTrue('workspace' in ws.resource_cache['new_ctx'])
self.assertIs(ws.resource_cache['new_ctx']['workspace'], ws)
finally:
OP_REGISTRY.remove_op(some_op)
def test_workspace_can_create_new_res_names(self):
ws = Workspace('/path', Workflow(OpMetaInfo('workspace_workflow', header=dict(description='Test!'))))
res_name_1 = ws.set_resource('cate.ops.utility.identity', mk_op_kwargs(value='A'))
res_name_2 = ws.set_resource('cate.ops.utility.identity', mk_op_kwargs(value='B'))
res_name_3 = ws.set_resource('cate.ops.utility.identity', mk_op_kwargs(value='C'))
self.assertEqual(res_name_1, 'res_1')
self.assertEqual(res_name_2, 'res_2')
self.assertEqual(res_name_3, 'res_3')
self.assertIsNotNone(ws.workflow.find_node(res_name_1))
self.assertIsNotNone(ws.workflow.find_node(res_name_2))
self.assertIsNotNone(ws.workflow.find_node(res_name_3))
def test_to_json_dict(self):
def dataset_op() -> xr.Dataset:
periods = 5
temperature_data = (15 + 8 * np.random.randn(periods, 2, 2)).round(decimals=1)
temperature_attrs = {'a': np.array([1, 2, 3]), 'comment': 'hot', '_FillValue': np.nan}
precipitation_data = (10 * np.random.rand(periods, 2, 2)).round(decimals=1)
precipitation_attrs = {'x': True, 'comment': 'wet', '_FillValue': -1.0}
ds = xr.Dataset(
data_vars={
'temperature': (('time', 'lat', 'lon'), temperature_data, temperature_attrs),
'precipitation': (('time', 'lat', 'lon'), precipitation_data, precipitation_attrs)
},
coords={
'lon': np.array([12, 13]),
'lat': np.array([50, 51]),
'time':
|
pd.date_range('2014-09-06', periods=periods)
|
pandas.date_range
|
import numpy as np
import pandas as pd
from tqdm import tqdm
import fmow_helper
BOXCOL = 'box0 box1 box2 box3'.split()
def fakegen(pbox):
mask = np.ones(len(pbox), dtype=bool)
nbox = pbox.copy()
i0 = 100
for i in tqdm(range(2*i0), desc="generate false_detection boxes"):
if not np.any(mask):
continue
n = np.sum(mask)
pbm = pbox[mask]
for col in BOXCOL[:2]:
nbox.loc[mask, col] = np.floor(np.random.rand(n) * pbm['img_width' if col in ('box0','box2') else 'img_height']).astype(int)
mu = 0.45
sd = 0.8
sigmoid = lambda t: 1 / (1+np.exp(-t))
rs = sigmoid(np.log(mu) + sd * np.random.randn(n))
asd = 0.6
ra = np.exp(asd * np.random.randn(n) / 2)
nbox.loc[mask, 'box2'] = np.floor(rs*ra * pbm.img_width).astype(int)
nbox.loc[mask, 'box3'] = np.floor(rs/ra * pbm.img_width).astype(int)
mask2 = mask.copy() & False
mask2 |= (nbox.box2 <= 0)
mask2 |= (nbox.box3 <= 0)
mask2 |= (nbox.box0+nbox.box2 >= nbox.img_width)
mask2 |= (nbox.box1+nbox.box3 >= nbox.img_height)
if i < i0:
mask2 |= np.minimum(np.minimum(nbox.box0+nbox.box2, pbox.box0+pbox.box2) - np.maximum(nbox.box0, pbox.box0),
np.minimum(nbox.box1+nbox.box3, pbox.box1+pbox.box3) - np.maximum(nbox.box1, pbox.box1)) > 0
mask = mask2
return nbox.assign(category='false_detection')
def full_fakegen(btrain, seed):
from scipy.stats import poisson
obj_width_m = btrain.groupby('obj_id').width_m.first()
obj_rate = pd.Series([1/6, 1/3, 5/6], [500, 1500, 5000]).loc[obj_width_m].values
obj_num = pd.Series(poisson.rvs(obj_rate, random_state=seed), obj_width_m.index)
obj_first_id = 9*10**5 + obj_num.shift().fillna(0).astype(int).cumsum()
box_num = obj_num.loc[btrain.obj_id].values
jumps = np.cumsum(box_num)
i = np.cumsum(
|
pd.value_counts(jumps)
|
pandas.value_counts
|
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from linearmodels.iv.data import IVData
try:
import xarray as xr
MISSING_XARRAY = False
except ImportError:
MISSING_XARRAY = True
def test_numpy_2d() -> None:
x = np.empty((10, 2))
xdh = IVData(x)
assert xdh.ndim == x.ndim
assert xdh.cols == ["x.0", "x.1"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
assert xdh.labels == {0: xdh.rows, 1: xdh.cols}
def test_numpy_1d() -> None:
x = np.empty(10)
xdh = IVData(x)
assert xdh.ndim == 2
assert xdh.cols == ["x"]
assert xdh.rows == list(np.arange(10))
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows)
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
def test_pandas_df_numeric() -> None:
x = np.empty((10, 2))
index = pd.date_range("2017-01-01", periods=10)
xdf = pd.DataFrame(x, columns=["a", "b"], index=index)
xdh = IVData(xdf)
assert xdh.ndim == 2
assert xdh.cols == list(xdf.columns)
assert xdh.rows == list(xdf.index)
assert_equal(xdh.ndarray, x)
df = pd.DataFrame(x, columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 2)
def test_pandas_series_numeric() -> None:
x = np.empty(10)
index = pd.date_range("2017-01-01", periods=10)
xs = pd.Series(x, name="charlie", index=index)
xdh = IVData(xs)
assert xdh.ndim == 2
assert xdh.cols == [xs.name]
assert xdh.rows == list(xs.index)
assert_equal(xdh.ndarray, x[:, None])
df = pd.DataFrame(x[:, None], columns=xdh.cols, index=xdh.rows).asfreq("D")
assert_frame_equal(xdh.pandas, df)
assert xdh.shape == (10, 1)
@pytest.mark.skipif(MISSING_XARRAY, reason="xarray not installed")
def test_xarray_1d() -> None:
x_np = np.random.randn(10)
x = xr.DataArray(x_np)
dh = IVData(x, "some_variable")
assert_equal(dh.ndarray, x_np[:, None])
assert dh.rows == list(np.arange(10))
assert dh.cols == ["some_variable.0"]
expected = pd.DataFrame(x_np, columns=dh.cols, index=dh.rows)
assert_frame_equal(expected, dh.pandas)
index =
|
pd.date_range("2017-01-01", periods=10)
|
pandas.date_range
|
import os
import pandas as pd
import json
from dataV3 import getAnsNumberFromLabel
from dataV3 import getQuestionNumberFromLabel
#workflow
#import all s_iaa, concat into a single df
#import all tags, concat into a single df
#Do the operations to get the rest of the things and add that to the df
#Split the df into component parts before dependency.py
ADJUDICATED_AGREEMENT_SCORE = .8
def import_tags(old_s_iaa_dir, tags_dir, schema_dir, output_dir):
'''
old_s_iaa_dir is directory to the output of iaa before it got sent to the adjudicator
For SFU old_s_iaa_dir can be an empty folder.
tags_dir is directory to adjudicator output
schema_dir is directory to where the schemas are held, should be same location as when IAA was run
output_dir is where the updated S_IAA files will be sent to
Requires every iaa file have namespaces that are unique to each schema
if a question didn't pass IAA, it won't have its fields corrected
output file naming convention is 'S_IAA'+schema_namespace
'''
tag_files = []
for root, dir, files in os.walk(tags_dir):
for file in files:
tag_files.append(tags_dir+'/'+file)
iaa_files = []
for root, dir, files in os.walk(old_s_iaa_dir):
for file in files:
if file.endswith('.csv') and 'iaa' in file.lower():
iaa_files.append(old_s_iaa_dir + '/' + file)
schema_files = []
for root, dir, files in os.walk(schema_dir):
for file in files:
if file.endswith('.csv'):
schema_files.append(schema_dir + '/' + file)
print('schema files: ', schema_dir, schema_files)
temp_dfs = []
for i in range(len(iaa_files)):
temp_dfs.append(pd.read_csv(iaa_files[i]))
if len(temp_dfs) >1:
iaa = pd.concat(temp_dfs)
else:
print("no iaa file found")
temp_dfs = []
for i in range(len(tag_files)):
temp_dfs.append(pd.read_csv(tag_files[i]))
tags = pd.concat(temp_dfs)
#Nan answer_uuid means it likely came from a triager task and we can disregard
tags = tags.dropna(subset = ['answer_uuid'])
temp_dfs = []
for i in range(len(schema_files)):
temp_dfs.append(
|
pd.read_csv(schema_files[i])
|
pandas.read_csv
|
"""
File: test_utils.py
Author: <NAME>
GitHub: https://github.com/PanyiDong/
Mathematics Department, University of Illinois at Urbana-Champaign (UIUC)
Project: My_AutoML
Latest Version: 0.2.0
Relative Path: /tests/test_utils/test_utils.py
File Created: Friday, 15th April 2022 7:42:15 pm
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 10th May 2022 8:10:40 pm
Modified By: <NAME> (<EMAIL>)
-----
MIT License
Copyright (c) 2022 - 2022, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import numpy as np
import pandas as pd
def test_load_data():
from My_AutoML import load_data
data = load_data().load("Appendix", "insurance")
assert isinstance(
data, dict
), "load_data should return a dict database, get {}".format(type(data))
assert isinstance(
data["insurance"], pd.DataFrame
), "load_data should return a dict database containing dataframes, get {}".format(
type(data["insurance"])
)
def test_random_guess():
from My_AutoML._utils._base import random_guess
assert random_guess(1) == 1, "random_guess(1) should be 1, get {}".format(
random_guess(1)
)
assert random_guess(0) == 0, "random_guess(0) should be 0, get {}".format(
random_guess(0)
)
assert (
random_guess(0.5) == 0 or random_guess(0.5) == 1
), "random_guess(0.5) should be either 0 or 1, get {}".format(random_guess(0.5))
def test_random_index():
from My_AutoML._utils._base import random_index
assert (
np.sort(random_index(5)) == np.array([0, 1, 2, 3, 4])
).all(), "random_index(5) should contain [0, 1, 2, 3, 4], get {}".format(
random_index(5)
)
def test_random_list():
from My_AutoML._utils._base import random_list
assert (
np.sort(random_list([0, 1, 2, 3, 4])) == np.array([0, 1, 2, 3, 4])
).all(), "random_index(5) should contain [0, 1, 2, 3, 4], get {}".format(
random_list([0, 1, 2, 3, 4])
)
def test_is_date():
from My_AutoML._utils._base import is_date
test = pd.DataFrame(
{
"col_1": [1, 2, 3, 4, 5],
"col_2": [
"2020-01-01",
"2020-01-02",
"2020-01-03",
"2020-01-04",
"2020-01-05",
],
}
)
assert is_date(test, rule="all"), "The is_date method is not correctly done."
def test_feature_rounding():
from My_AutoML._utils._base import feature_rounding
test = pd.DataFrame(
{
"col_1": [1, 2, 3, 4, 5],
"col_2": [1.2, 2.2, 3.2, 4.2, 5.2],
}
)
target_data = pd.DataFrame(
{
"col_1": [1, 2, 3, 4, 5],
"col_2": [1.0, 2.0, 3.0, 4.0, 5.0],
}
)
assert (
feature_rounding(test) == target_data
).all().all() == True, "The feature_rounding method is not correctly done."
def test_timer():
from My_AutoML._utils._base import Timer
import time
timer = Timer()
timer.start()
time.sleep(4)
timer.stop()
timer.start()
time.sleep(3)
timer.stop()
assert timer.sum() / timer.avg() == 2.0, "The timer is not correctly done."
assert timer.cumsum()[-1] == timer.sum(), "The timer is not correctly done."
def test_minloc():
from My_AutoML._utils._base import minloc
assert (
minloc([4, 2, 6, 2, 1]) == 4
), "minloc([4, 2, 6, 2, 1]) should be 5, get {}".format(minloc([4, 2, 6, 2, 1]))
def test_maxloc():
from My_AutoML._utils._base import maxloc
assert (
maxloc([4, 2, 6, 2, 1]) == 2
), "maxloc([4, 2, 6, 2, 1]) should be 5, get {}".format(maxloc([4, 2, 6, 2, 1]))
def test_True_index():
from My_AutoML._utils._base import True_index
assert True_index([True, False, 1, 0, "hello", 5]) == [
0,
2,
], "True_index([True, False, 1, 0, 'hello', 5]) should be [0, 2], get {}".format(
True_index([True, False, 1, 0, "hello", 5])
)
def test_type_of_script():
from My_AutoML._utils._base import type_of_script
assert (
type_of_script() == "terminal"
), "type_of_script() should be 'terminal', get {}".format(type_of_script())
def test_as_dataframe():
from My_AutoML._utils._data import as_dataframe
converter = as_dataframe()
_array = converter.to_array(pd.DataFrame([1, 2, 3, 4]))
_df = converter.to_df(_array)
assert isinstance(
_array, np.ndarray
), "as_dataframe.to_array should return a np.ndarray, get {}".format(type(_array))
assert isinstance(
_df, pd.DataFrame
), "as_dataframe.to_df should return a pd.DataFrame, get {}".format(type(_df))
def test_unify_nan():
from My_AutoML._utils._data import unify_nan
data = np.arange(15).reshape(5, 3)
data =
|
pd.DataFrame(data, columns=["column_1", "column_2", "column_3"])
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
assert result.kind == expected
def test_concat_empty_series_dtypes_triple(self):
assert (
concat(
[
|
Series(dtype="M8[ns]")
|
pandas.Series
|
import pandas as pd
from libs.utils import haversine, calcWindComponents, isaDiff, getPerf, loadBook
from configuration.units import runwayUnits
# definitions
def findTakeoff(flight): # returns the row of the takeoff point
garminGround = flight[flight["OnGrnd"] == 0].index.min() # Garmin Ground indicator
startAltitude = flight.loc[garminGround, "AltGPS"]
return flight[
(flight.index > garminGround)
& (flight.AltGPS > startAltitude + 3)
& (flight.VSpd > 100)
].index.min()
def find50feet(flight): # returns the row of the takeoff point
garminGround = flight[flight["OnGrnd"] == 0].index.min() # Garmin Ground indicator
startAltitude = flight.loc[garminGround, "AltGPS"]
return flight[
(flight.index > garminGround) & (flight.AltGPS > startAltitude + 50)
].index.min()
def takeoffStability(flight, modelConfig): # returns the row of the takeoff point
garminGround = flight[flight["OnGrnd"] == 0].index.min() # Garmin Ground indicator
startAltitude = flight.loc[garminGround, "AltGPS"]
takeoff = findTakeoff(flight)
fivehundred = flight[
(flight.index > garminGround) & (flight.AltGPS > startAltitude + 500)
].index.min()
maxPitch = int(flight.loc[takeoff:fivehundred, "Pitch"].max())
minPitch = int(flight.loc[takeoff:fivehundred, "Pitch"].min())
maxRoll = int(flight.loc[takeoff:fivehundred, "Roll"].abs().max())
continuousClimb = flight.loc[takeoff:fivehundred, "VSpd"].min() > 0
bookMaxPitch = int(modelConfig.loc["takeoffMaxPitch", "Value"])
bookMinPitch = int(modelConfig.loc["takeoffMinPitch", "Value"])
bookMaxRoll = int(modelConfig.loc["takeoffMaxRoll", "Value"])
stableTable = pd.DataFrame(columns=["Actual", "Book", "Stability", "Units"])
stableTable.loc["Takeoff Max Pitch"] = [
maxPitch,
bookMaxPitch,
maxPitch > bookMaxPitch,
"degrees",
]
stableTable.loc["Takeoff Min Pitch"] = [
minPitch,
bookMinPitch,
minPitch < bookMinPitch,
"degrees",
]
stableTable.loc["Takeoff Max Roll"] = [
maxRoll,
bookMaxRoll,
maxRoll > bookMaxRoll,
"degrees",
]
stableTable.loc["Takeoff Continuous Climb"] = [
continuousClimb,
"True",
not continuousClimb,
"-",
]
stableTable["Stability"] = stableTable["Stability"].apply(
lambda x: "Unstable" if x else "Stable"
)
stableTable.loc["Takeoff Stability"] = [
"Stable" if (stableTable["Stability"] == "Stable").all() else "Unstable",
"True",
"-",
"-",
]
return stableTable
def findGroundRollStart(
groundPortion, modelConfig
): # finds the row where take off roll started. This is model dependent
takeoffPowerTreshold = float(
modelConfig.loc["takeoffPowerTreshold", "Value"]
) # indicates the POWER above which we consider the ground roll to start
takeoffPowerIndicator = modelConfig.loc["takeoffPowerIndicator", "Value"]
return groundPortion[
groundPortion[takeoffPowerIndicator] > takeoffPowerTreshold
].index.min()
def calcGroundRoll(flight, modelConfig):
garminGround = flight[flight["OnGrnd"] == 0].index.min() # Garmin Ground indicator
takeoffPoint = findTakeoff(flight)
rollStart = findGroundRollStart(flight[:takeoffPoint], modelConfig)
dist = haversine(
flight["Longitude"][rollStart],
flight["Latitude"][rollStart],
flight["Longitude"][takeoffPoint],
flight["Latitude"][takeoffPoint],
runwayUnits,
)
ais = flight.loc[takeoffPoint, "IAS"]
temp = flight.loc[rollStart, "OAT"]
pressAlt = flight.loc[rollStart, "AltPress"]
windSpeed = flight.loc[garminGround:takeoffPoint, "WndSpd"].mean()
windDirection = flight.loc[garminGround:takeoffPoint, "WndDr"].mean()
track = flight.loc[garminGround:takeoffPoint, "TRK"].mean()
return dist, ais, temp, pressAlt, windSpeed, windDirection, track
def calc50feetDistance(flight, modelConfig):
fiftyfeetPoint = find50feet(flight)
rollStart = findGroundRollStart(flight[:fiftyfeetPoint], modelConfig)
dist = haversine(
flight["Longitude"][rollStart],
flight["Latitude"][rollStart],
flight["Longitude"][fiftyfeetPoint],
flight["Latitude"][fiftyfeetPoint],
runwayUnits,
)
engineType = modelConfig.loc["engineType", "Value"]
if engineType == "piston":
bookTakeoffMAP = float(modelConfig.loc["takeoffMAP", "Value"])
bookTakeoffRPM = float(modelConfig.loc["takeoffRPM", "Value"])
bookminTakeoffFFlow = float(modelConfig.loc["minTakeoffFFlow", "Value"])
takeoffMAP = (
flight["E1 MAP"][fiftyfeetPoint - 10 : fiftyfeetPoint].mean().round(1)
)
takeoffRPM = (
flight["E1 RPM"][fiftyfeetPoint - 10 : fiftyfeetPoint].mean().round(0)
)
takeoffFFlow = (
flight["E1 FFlow"][fiftyfeetPoint - 10 : fiftyfeetPoint].mean().round(1)
)
engineInfo = pd.DataFrame(
[
[takeoffMAP, bookTakeoffMAP, "inches"],
[takeoffRPM, bookTakeoffRPM],
[takeoffFFlow, bookminTakeoffFFlow, "gph"],
],
index=["Take off MAP", "Take off RPM", "Take off Fuel Flow"],
columns=["Actual", "Book", "Units"],
)
engineInfo["Variance"] = round(100 * (engineInfo.Actual / engineInfo.Book - 1))
engineInfo = engineInfo[["Actual", "Book", "Variance", "Units"]]
else:
engineInfo =
|
pd.DataFrame(columns=["Actual", "Book", "Variance", "Units"])
|
pandas.DataFrame
|
import os
from packaging.version import Version
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box
from fiona.errors import DriverError
import geopandas
from geopandas import GeoDataFrame, GeoSeries, overlay, read_file
from geopandas import _compat
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
import pytest
DATA = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data", "overlay")
pytestmark = pytest.mark.skip_no_sindex
pandas_133 = Version(pd.__version__) == Version("1.3.3")
@pytest.fixture
def dfs(request):
s1 = GeoSeries(
[
Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
Polygon([(2, 2), (4, 2), (4, 4), (2, 4)]),
]
)
s2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": s1})
df2 = GeoDataFrame({"col2": [1, 2], "geometry": s2})
return df1, df2
@pytest.fixture(params=["default-index", "int-index", "string-index"])
def dfs_index(request, dfs):
df1, df2 = dfs
if request.param == "int-index":
df1.index = [1, 2]
df2.index = [0, 2]
if request.param == "string-index":
df1.index = ["row1", "row2"]
return df1, df2
@pytest.fixture(
params=["union", "intersection", "difference", "symmetric_difference", "identity"]
)
def how(request):
if pandas_133 and request.param in ["symmetric_difference", "identity", "union"]:
pytest.xfail("Regression in pandas 1.3.3 (GH #2101)")
return request.param
@pytest.fixture(params=[True, False])
def keep_geom_type(request):
return request.param
def test_overlay(dfs_index, how):
"""
Basic overlay test with small dummy example dataframes (from docs).
Results obtained using QGIS 2.16 (Vector -> Geoprocessing Tools ->
Intersection / Union / ...), saved to GeoJSON
"""
df1, df2 = dfs_index
result = overlay(df1, df2, how=how)
# construction of result
def _read(name):
expected = read_file(
os.path.join(DATA, "polys", "df1_df2-{0}.geojson".format(name))
)
expected.crs = None
return expected
if how == "identity":
expected_intersection = _read("intersection")
expected_difference = _read("difference")
expected = pd.concat(
[expected_intersection, expected_difference], ignore_index=True, sort=False
)
expected["col1"] = expected["col1"].astype(float)
else:
expected = _read(how)
# TODO needed adaptations to result
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
elif how == "difference":
result = result.reset_index(drop=True)
assert_geodataframe_equal(result, expected, check_column_type=False)
# for difference also reversed
if how == "difference":
result = overlay(df2, df1, how=how)
result = result.reset_index(drop=True)
expected = _read("difference-inverse")
assert_geodataframe_equal(result, expected, check_column_type=False)
@pytest.mark.filterwarnings("ignore:GeoSeries crs mismatch:UserWarning")
def test_overlay_nybb(how):
polydf = read_file(geopandas.datasets.get_path("nybb"))
# The circles have been constructed and saved at the time the expected
# results were created (exact output of buffer algorithm can slightly
# change over time -> use saved ones)
# # construct circles dataframe
# N = 10
# b = [int(x) for x in polydf.total_bounds]
# polydf2 = GeoDataFrame(
# [
# {"geometry": Point(x, y).buffer(10000), "value1": x + y, "value2": x - y}
# for x, y in zip(
# range(b[0], b[2], int((b[2] - b[0]) / N)),
# range(b[1], b[3], int((b[3] - b[1]) / N)),
# )
# ],
# crs=polydf.crs,
# )
polydf2 = read_file(os.path.join(DATA, "nybb_qgis", "polydf2.shp"))
result = overlay(polydf, polydf2, how=how)
cols = ["BoroCode", "BoroName", "Shape_Leng", "Shape_Area", "value1", "value2"]
if how == "difference":
cols = cols[:-2]
# expected result
if how == "identity":
# read union one, further down below we take the appropriate subset
expected = read_file(os.path.join(DATA, "nybb_qgis", "qgis-union.shp"))
else:
expected = read_file(
os.path.join(DATA, "nybb_qgis", "qgis-{0}.shp".format(how))
)
# The result of QGIS for 'union' contains incorrect geometries:
# 24 is a full original circle overlapping with unioned geometries, and
# 27 is a completely duplicated row)
if how == "union":
expected = expected.drop([24, 27])
expected.reset_index(inplace=True, drop=True)
# Eliminate observations without geometries (issue from QGIS)
expected = expected[expected.is_valid]
expected.reset_index(inplace=True, drop=True)
if how == "identity":
expected = expected[expected.BoroCode.notnull()].copy()
# Order GeoDataFrames
expected = expected.sort_values(cols).reset_index(drop=True)
# TODO needed adaptations to result
result = result.sort_values(cols).reset_index(drop=True)
if how in ("union", "identity"):
# concat < 0.23 sorts, so changes the order of the columns
# but at least we ensure 'geometry' is the last column
assert result.columns[-1] == "geometry"
assert len(result.columns) == len(expected.columns)
result = result.reindex(columns=expected.columns)
# the ordering of the spatial index results causes slight deviations
# in the resultant geometries for multipolygons
# for more details on the discussion, see:
# https://github.com/geopandas/geopandas/pull/1338
# https://github.com/geopandas/geopandas/issues/1337
# Temporary workaround below:
# simplify multipolygon geometry comparison
# since the order of the constituent polygons depends on
# the ordering of spatial indexing results, we cannot
# compare symmetric_difference results directly when the
# resultant geometry is a multipolygon
# first, check that all bounds and areas are approx equal
# this is a very rough check for multipolygon equality
if not _compat.PANDAS_GE_11:
kwargs = dict(check_less_precise=True)
else:
kwargs = {}
pd.testing.assert_series_equal(
result.geometry.area, expected.geometry.area, **kwargs
)
pd.testing.assert_frame_equal(
result.geometry.bounds, expected.geometry.bounds, **kwargs
)
# There are two cases where the multipolygon have a different number
# of sub-geometries -> not solved by normalize (and thus drop for now)
if how == "symmetric_difference":
expected.loc[9, "geometry"] = None
result.loc[9, "geometry"] = None
if how == "union":
expected.loc[24, "geometry"] = None
result.loc[24, "geometry"] = None
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_crs=False,
check_column_type=False,
check_less_precise=True,
)
def test_overlay_overlap(how):
"""
Overlay test with overlapping geometries in both dataframes.
Test files are created with::
import geopandas
from geopandas import GeoSeries, GeoDataFrame
from shapely.geometry import Point, Polygon, LineString
s1 = GeoSeries([Point(0, 0), Point(1.5, 0)]).buffer(1, resolution=2)
s2 = GeoSeries([Point(1, 1), Point(2, 2)]).buffer(1, resolution=2)
df1 = GeoDataFrame({'geometry': s1, 'col1':[1,2]})
df2 = GeoDataFrame({'geometry': s2, 'col2':[1, 2]})
ax = df1.plot(alpha=0.5)
df2.plot(alpha=0.5, ax=ax, color='C1')
df1.to_file('geopandas/geopandas/tests/data/df1_overlap.geojson',
driver='GeoJSON')
df2.to_file('geopandas/geopandas/tests/data/df2_overlap.geojson',
driver='GeoJSON')
and then overlay results are obtained from using QGIS 2.16
(Vector -> Geoprocessing Tools -> Intersection / Union / ...),
saved to GeoJSON.
"""
df1 = read_file(os.path.join(DATA, "overlap", "df1_overlap.geojson"))
df2 = read_file(os.path.join(DATA, "overlap", "df2_overlap.geojson"))
result = overlay(df1, df2, how=how)
if how == "identity":
raise pytest.skip()
expected = read_file(
os.path.join(DATA, "overlap", "df1_df2_overlap-{0}.geojson".format(how))
)
if how == "union":
# the QGIS result has the last row duplicated, so removing this
expected = expected.iloc[:-1]
# TODO needed adaptations to result
result = result.reset_index(drop=True)
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
)
@pytest.mark.parametrize("other_geometry", [False, True])
def test_geometry_not_named_geometry(dfs, how, other_geometry):
# Issue #306
# Add points and flip names
df1, df2 = dfs
df3 = df1.copy()
df3 = df3.rename(columns={"geometry": "polygons"})
df3 = df3.set_geometry("polygons")
if other_geometry:
df3["geometry"] = df1.centroid.geometry
assert df3.geometry.name == "polygons"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df3, df2, how=how)
assert df3.geometry.name == "polygons"
if how == "difference":
# in case of 'difference', column names of left frame are preserved
assert res2.geometry.name == "polygons"
if other_geometry:
assert "geometry" in res2.columns
assert_geoseries_equal(
res2["geometry"], df3["geometry"], check_series_type=False
)
res2 = res2.drop(["geometry"], axis=1)
res2 = res2.rename(columns={"polygons": "geometry"})
res2 = res2.set_geometry("geometry")
# TODO if existing column is overwritten -> geometry not last column
if other_geometry and how == "intersection":
res2 = res2.reindex(columns=res1.columns)
assert_geodataframe_equal(res1, res2)
df4 = df2.copy()
df4 = df4.rename(columns={"geometry": "geom"})
df4 = df4.set_geometry("geom")
if other_geometry:
df4["geometry"] = df2.centroid.geometry
assert df4.geometry.name == "geom"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df1, df4, how=how)
assert_geodataframe_equal(res1, res2)
def test_bad_how(dfs):
df1, df2 = dfs
with pytest.raises(ValueError):
overlay(df1, df2, how="spandex")
def test_duplicate_column_name(dfs, how):
if how == "difference":
pytest.skip("Difference uses columns from one df only.")
df1, df2 = dfs
df2r = df2.rename(columns={"col2": "col1"})
res = overlay(df1, df2r, how=how)
assert ("col1_1" in res.columns) and ("col1_2" in res.columns)
def test_geoseries_warning(dfs):
df1, df2 = dfs
# Issue #305
with pytest.raises(NotImplementedError):
overlay(df1, df2.geometry, how="union")
def test_preserve_crs(dfs, how):
df1, df2 = dfs
result = overlay(df1, df2, how=how)
assert result.crs is None
crs = "epsg:4326"
df1.crs = crs
df2.crs = crs
result = overlay(df1, df2, how=how)
assert result.crs == crs
def test_crs_mismatch(dfs, how):
df1, df2 = dfs
df1.crs = 4326
df2.crs = 3857
with pytest.warns(UserWarning, match="CRS mismatch between the CRS"):
overlay(df1, df2, how=how)
def test_empty_intersection(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(-1, -1), (-3, -1), (-3, -3), (-1, -3)]),
Polygon([(-3, -3), (-5, -3), (-5, -5), (-3, -5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2]})
expected = GeoDataFrame([], columns=["col1", "col3", "geometry"])
result = overlay(df1, df3)
assert_geodataframe_equal(result, expected, check_dtype=False)
def test_correct_index(dfs):
# GH883 - case where the index was not properly reset
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2, 3]})
i1 = Polygon([(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)])
i2 = Polygon([(3, 3), (3, 5), (5, 5), (5, 3), (3, 3)])
expected = GeoDataFrame(
[[1, 1, i1], [3, 2, i2]], columns=["col3", "col2", "geometry"]
)
result = overlay(df3, df2, keep_geom_type=True)
assert_geodataframe_equal(result, expected)
def test_warn_on_keep_geom_type(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3})
with pytest.warns(UserWarning, match="`keep_geom_type=True` in overlay"):
overlay(df2, df3, keep_geom_type=None)
@pytest.mark.parametrize(
"geom_types", ["polys", "poly_line", "poly_point", "line_poly", "point_poly"]
)
def test_overlay_strict(how, keep_geom_type, geom_types):
"""
Test of mixed geometry types on input and output. Expected results initially
generated using following snippet.
polys1 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df1 = gpd.GeoDataFrame({'col1': [1, 2], 'geometry': polys1})
polys2 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df2 = gpd.GeoDataFrame({'geometry': polys2, 'col2': [1, 2, 3]})
lines1 = gpd.GeoSeries([LineString([(2, 0), (2, 4), (6, 4)]),
LineString([(0, 3), (6, 3)])])
df3 = gpd.GeoDataFrame({'col3': [1, 2], 'geometry': lines1})
points1 = gpd.GeoSeries([Point((2, 2)),
Point((3, 3))])
df4 = gpd.GeoDataFrame({'col4': [1, 2], 'geometry': points1})
params=["union", "intersection", "difference", "symmetric_difference",
"identity"]
stricts = [True, False]
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df2, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('polys_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df3, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_line_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df4, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_point_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
"""
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
polys2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df2 = GeoDataFrame({"geometry": polys2, "col2": [1, 2, 3]})
lines1 = GeoSeries(
[LineString([(2, 0), (2, 4), (6, 4)]), LineString([(0, 3), (6, 3)])]
)
df3 = GeoDataFrame({"col3": [1, 2], "geometry": lines1})
points1 = GeoSeries([Point((2, 2)), Point((3, 3))])
df4 = GeoDataFrame({"col4": [1, 2], "geometry": points1})
if geom_types == "polys":
result = overlay(df1, df2, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_line":
result = overlay(df1, df3, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_point":
result = overlay(df1, df4, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "line_poly":
result = overlay(df3, df1, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "point_poly":
result = overlay(df4, df1, how=how, keep_geom_type=keep_geom_type)
try:
expected = read_file(
os.path.join(
DATA,
"strict",
"{t}_{h}_{s}.geojson".format(t=geom_types, h=how, s=keep_geom_type),
)
)
# the order depends on the spatial index used
# so we sort the resultant dataframes to get a consistent order
# independently of the spatial index implementation
assert all(expected.columns == result.columns), "Column name mismatch"
cols = list(set(result.columns) - set(["geometry"]))
expected = expected.sort_values(cols, axis=0).reset_index(drop=True)
result = result.sort_values(cols, axis=0).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
check_crs=False,
check_dtype=False,
)
except DriverError: # fiona >= 1.8
assert result.empty
except OSError: # fiona < 1.8
assert result.empty
def test_mixed_geom_error():
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
mixed = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
dfmixed = GeoDataFrame({"col1": [1, 2], "geometry": mixed})
with pytest.raises(NotImplementedError):
overlay(df1, dfmixed, keep_geom_type=True)
def test_keep_geom_type_error():
gcol = GeoSeries(
GeometryCollection(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
)
dfcol = GeoDataFrame({"col1": [2], "geometry": gcol})
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
with pytest.raises(TypeError):
overlay(dfcol, df1, keep_geom_type=True)
def test_keep_geom_type_geometry_collection():
# GH 1581
df1 = read_file(os.path.join(DATA, "geom_type", "df1.geojson"))
df2 = read_file(os.path.join(DATA, "geom_type", "df2.geojson"))
with pytest.warns(UserWarning, match="`keep_geom_type=True` in overlay"):
intersection = overlay(df1, df2, keep_geom_type=None)
assert len(intersection) == 1
assert (intersection.geom_type == "Polygon").all()
intersection = overlay(df1, df2, keep_geom_type=True)
assert len(intersection) == 1
assert (intersection.geom_type == "Polygon").all()
intersection = overlay(df1, df2, keep_geom_type=False)
assert len(intersection) == 1
assert (intersection.geom_type == "GeometryCollection").all()
def test_keep_geom_type_geometry_collection2():
polys1 = [
box(0, 0, 1, 1),
box(1, 1, 3, 3).union(box(1, 3, 5, 5)),
]
polys2 = [
box(0, 0, 1, 1),
box(3, 1, 4, 2).union(box(4, 1, 5, 4)),
]
df1 = GeoDataFrame({"left": [0, 1], "geometry": polys1})
df2 = GeoDataFrame({"right": [0, 1], "geometry": polys2})
result1 = overlay(df1, df2, keep_geom_type=True)
expected1 = GeoDataFrame(
{
"left": [0, 1],
"right": [0, 1],
"geometry": [box(0, 0, 1, 1), box(4, 3, 5, 4)],
}
)
assert_geodataframe_equal(result1, expected1)
result1 = overlay(df1, df2, keep_geom_type=False)
expected1 = GeoDataFrame(
{
"left": [0, 1, 1],
"right": [0, 0, 1],
"geometry": [
box(0, 0, 1, 1),
Point(1, 1),
GeometryCollection([box(4, 3, 5, 4), LineString([(3, 1), (3, 2)])]),
],
}
)
assert_geodataframe_equal(result1, expected1)
def test_keep_geom_type_geomcoll_different_types():
polys1 = [box(0, 1, 1, 3), box(10, 10, 12, 12)]
polys2 = [
Polygon([(1, 0), (3, 0), (3, 3), (1, 3), (1, 2), (2, 2), (2, 1), (1, 1)]),
box(11, 11, 13, 13),
]
df1 = GeoDataFrame({"left": [0, 1], "geometry": polys1})
df2 = GeoDataFrame({"right": [0, 1], "geometry": polys2})
result1 = overlay(df1, df2, keep_geom_type=True)
expected1 = GeoDataFrame(
{
"left": [1],
"right": [1],
"geometry": [box(11, 11, 12, 12)],
}
)
assert_geodataframe_equal(result1, expected1)
result2 = overlay(df1, df2, keep_geom_type=False)
expected2 = GeoDataFrame(
{
"left": [0, 1],
"right": [0, 1],
"geometry": [
GeometryCollection([LineString([(1, 2), (1, 3)]), Point(1, 1)]),
box(11, 11, 12, 12),
],
}
)
assert_geodataframe_equal(result2, expected2)
def test_keep_geom_type_geometry_collection_difference():
# GH 2163
polys1 = [
box(0, 0, 1, 1),
box(1, 1, 2, 2),
]
# the tiny sliver in the second geometry may be converted to a
# linestring during the overlay process due to floating point errors
# on some platforms
polys2 = [
box(0, 0, 1, 1),
box(1, 1, 2, 3).union(box(2, 2, 3, 2.00000000000000001)),
]
df1 = GeoDataFrame({"left": [0, 1], "geometry": polys1})
df2 = GeoDataFrame({"right": [0, 1], "geometry": polys2})
result1 = overlay(df2, df1, keep_geom_type=True, how="difference")
expected1 = GeoDataFrame(
{
"right": [1],
"geometry": [box(1, 2, 2, 3)],
},
)
assert_geodataframe_equal(result1, expected1)
@pytest.mark.parametrize("make_valid", [True, False])
def test_overlap_make_valid(make_valid):
bowtie = Polygon([(1, 1), (9, 9), (9, 1), (1, 9), (1, 1)])
assert not bowtie.is_valid
fixed_bowtie = bowtie.buffer(0)
assert fixed_bowtie.is_valid
df1 = GeoDataFrame({"col1": ["region"], "geometry": GeoSeries([box(0, 0, 10, 10)])})
df_bowtie = GeoDataFrame(
{"col1": ["invalid", "valid"], "geometry": GeoSeries([bowtie, fixed_bowtie])}
)
if make_valid:
df_overlay_bowtie = overlay(df1, df_bowtie, make_valid=make_valid)
assert df_overlay_bowtie.at[0, "geometry"].equals(fixed_bowtie)
assert df_overlay_bowtie.at[1, "geometry"].equals(fixed_bowtie)
else:
with pytest.raises(ValueError, match="1 invalid input geometries"):
overlay(df1, df_bowtie, make_valid=make_valid)
def test_empty_overlay_return_non_duplicated_columns():
nybb = geopandas.read_file(geopandas.datasets.get_path("nybb"))
nybb2 = nybb.copy()
nybb2.geometry = nybb2.translate(20000000)
result = geopandas.overlay(nybb, nybb2)
expected = GeoDataFrame(
columns=[
"BoroCode_1",
"BoroName_1",
"Shape_Leng_1",
"Shape_Area_1",
"BoroCode_2",
"BoroName_2",
"Shape_Leng_2",
"Shape_Area_2",
"geometry",
],
crs=nybb.crs,
)
assert_geodataframe_equal(result, expected, check_dtype=False)
def test_non_overlapping(how):
p1 = Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
p2 = Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])
df1 = GeoDataFrame({"col1": [1], "geometry": [p1]})
df2 = GeoDataFrame({"col2": [2], "geometry": [p2]})
result = overlay(df1, df2, how=how)
if how == "intersection":
expected = GeoDataFrame(
{
"col1": np.array([], dtype="int64"),
"col2": np.array([], dtype="int64"),
"geometry": [],
},
index=
|
pd.Index([], dtype="object")
|
pandas.Index
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [
|
pd.Timestamp(date_text)
|
pandas.Timestamp
|
from warnings import warn as Warn
from collections import OrderedDict
from . import utils
import numpy as np
import copy
import pandas as pd
try:
import matplotlib.pyplot as plt
except ImportError:
class RaiseForMissingMPL(object):
def __getattribute__(*_, **__):
raise ImportError("No module named matplotlib")
plt = RaiseForMissingMPL()
class GIGOError(Exception):
"""
You're trying to do something that will significantly harm
the validity of your inference.
This exception will tell you that.
"""
pass
def _raiseGIGO(msg):
raise GIGOError(msg)
class Preprocessor(object):
"""
"""
@classmethod
def from_arrays(cls, voteshares,
turnout,
years,
redistrict,
district_ids,
covariates=None,
missing='drop',
uncontested=None,
break_on_GIGO=True
):
"""
Method mimicking default Preprocessor() construction, but
with more limited functionality to construct directly from arrays.
Specifically, this has less input checking, so to avoid GIGO errors,
all input arrays must:
1. have no nan observations
2. have no uncontested elections, or at least uncontesteds are resolved to 0,1 and assumed correct.
3. all arrays are long (n*t,k) where k is the number or relevant covariates for the attribute.
refer to Preprocessor for more information.
"""
if covariates is None:
covariates = dict()
frame = pd.DataFrame.from_dict(
dict(vote_share=voteshares,
turnout=turnout,
year=years,
redistrict=redistrict,
district_id=district_ids,
**covariates
))
return cls(frame,
share_column='vote_share',
weight_column='turnout',
covariates=list(covariates.keys()),
years='year',
redistrict='redistrict',
district_id='district_id',
missing=missing,
uncontested=uncontested,
break_on_GIGO=break_on_GIGO)
def __init__(self, frame,
share_column='vote_share',
covariates=None,
weight_column=None,
year_column='year',
redistrict_column=None,
district_id='district_id',
missing='drop',
uncontested=None,
break_on_GIGO=True):
"""
frame : pd.DataFrame
long dataframe in which the elections data is stored.
share_column : str
column of dataframe for which the two-party vote shares are stored.
covariates : list of str
columns in `frame` to use to predict `share_column`.
weight_column : str
turnout or other weight to assign each district in computing weighted
averages for vote shares
year_column : str
name of column holding the year in which each contest occurs
redistrict_column : str
name of column holding information about when redistricting occurs
in the dataset. Should be a binary indicator or a boolean
(True if redistricting occurred between that row's `year` and the previous year)
district_id : str
name of column which contains the district's unique id (stable over years)
missing : str
missing policy. Can be either:
drop : drop entries that are missing any data
impute : take the mean of the column as the value in the data
ignore : ignore the missing values
(default: drop)
uncontested : str
uncontested policy. Can be either:
censor : clip the data to a given vote share
winsor : clip the data to a given percentage
drop : drop uncontested elections
ignore : do nothing about uncontested elections.
impute : impute vote shares from the available data in each year
on other vote shares
impute_recursive: impute vote shares from available data in each year
and the previous year's (possibly imputed) vote share.
impute_singlepass: impute vote shares from available data in each year and
the previous year's vote share. Imputations are not carried forward.
(default : censor)
break_on_GIGO : bool
whether or not to break or proceed when a computation is determined
to yield meaningless but possibly non-null results. This may occur when
imputation is requested but covariates are not provided, when simulations
are requested for data with incorrect scope or structure, among others.
NOTE: this only catches a few common structural issues in imputation,
simulation, and prediction. It does not guarantee that results are "valid."
DO NOT change this unless you are sure you know why you need to change this.
"""
super().__init__()
if break_on_GIGO:
self._GIGO = _raiseGIGO
else:
self._GIGO = lambda x: Warn(x, category=GIGOError, stacklevel=2)
self.elex_frame = frame.copy()
if covariates is None:
self._covariate_cols = []
else:
self._covariate_cols = list(covariates)
provided = [x for x in (share_column, *self._covariate_cols,
weight_column,
year_column, redistrict_column,
district_id) if x is not None]
self.elex_frame[provided]
self.elex_frame.rename(columns={
share_column: 'vote_share',
district_id: 'district_id',
year_column: 'year',
weight_column: 'weight',
redistrict_column: 'redistrict'
}, inplace=True)
try:
assert len(self.elex_frame.columns) == len(
set(self.elex_frame.columns))
except AssertionError:
raise AssertionError('Election frame contains '
'duplicated columns: {}'.format(
self.elex_frame.columns))
if weight_column is None:
self.elex_frame['weight'] = 1
if uncontested is None:
uncontested = dict(method='censor')
elif isinstance(uncontested, str):
uncontested = dict(method=uncontested)
if (uncontested['method'].lower().startswith('imp') or
uncontested['method'].lower() in ('recursive', 'singlepass')):
uncontested['covariates'] = copy.deepcopy(self._covariate_cols)
if year_column is not None:
try:
self.elex_frame['year'] = self.elex_frame.year.astype(int)
except KeyError:
raise KeyError("The provided year column is not "
"found in the dataframe."
" Provided: {}".format(self._year_column))
if redistrict_column is not None:
try:
self.elex_frame.redistrict = self.elex_frame.redistrict.astype(
int)
except KeyError:
raise KeyError("The provided year column is "
"not found in the dataframe."
"\n\tProvided: {}".format(
self._redistrict_column))
self._resolve_missing(method=missing)
self._resolve_uncontested(**uncontested)
if uncontested.get('ordinal', True):
if uncontested['method'].lower() != 'drop':
self._covariate_cols.append('uncontested')
else:
dummies = pd.get_dummies(self.elex_frame.uncontested)
dummies.columns = ['uncontested_R',
'contested',
'uncontested_D']
self.elex_frame = pd.concat((self.elex_frame, dummies), axis=1)
self.elex_frame.drop('uncontested', axis=1, inplace=True)
if uncontested['method'].lower() != 'drop':
self._covariate_cols.extend(dummies.columns.tolist())
self.wide = utils.make_designs(self.elex_frame,
years=self.elex_frame.year,
redistrict=self.elex_frame.get(
'redistrict'),
district_id='district_id')
self.long = pd.concat(self.wide, axis=0, sort=True)
@staticmethod
def _impute_turnout_from_voteshare_and_state(df, turnout_col='turnout',
state_col='state'):
"""
Impute turnout from the voteshare and state category. This specifies:
turnout ~ vote_share + vote_share**2 + StateFixedEffect
"""
complete = df.dropna(subset=(turnout_col), inplace=False)
missing_data = df[['vote_share'] + [state_col]].isnull().any(axis=0)
if missing_data.any():
missing_cols = df.columns[missing_data]
self._GIGO("Missing data in imputation of turnout "
"for column: {}".format(missing_data))
import statsmodels.formula.api as smf
import statsmodels.api as sm
model = smf.OLS('turnout ~ vote_share + '
' I(vote_share**2) + C(state, Treatment)',
data=df).fit()
incomplete_ix = complete[complete[turnout_col].isnull()].index
imcomplete_X = df.ix[incomplete, [turnout_col, state_col]]
preds = model.predict(sm.add_constant(incomplete_X))
df.ix[turnout_col, incomplete_ix] = preds
return df[turnout_col]
def _resolve_missing(self, method='drop'):
"""
Resolve missing data issues using a given method:
drop : drop entries that are missing any data
impute : take the mean of the column as the value in the data
ignore : ignore the missing values
"""
targets = self._covariate_cols + ['weight']
if (method.lower() == 'drop'):
self.elex_frame.dropna(subset=targets, inplace=True)
elif (method.lower() == 'impute'):
for i, year in self.elex_frame.groupby('year'):
colmeans = year[targets].mean(axis=0)
self.elex_frame.ix[year.index,
targets] = year[targets].fillna(colmeans)
elif (method.lower() == 'ignore'):
return
else:
raise KeyError("Method to resolve missing data not clear."
"\n\tRecieved: {}\n\t Supported: 'drop'"
"".format(method))
any_in_column = self.elex_frame[targets].isnull().any(axis=0)
if any_in_column.any():
still_missing = self.elex_frame.columns[any_in_columns]
self._GIGO('After resolving missing data '
'using {}, the following columns '
'still have missing data: {}'.format(still_missing))
def _resolve_uncontested(self, method='censor',
floor=None, ceil=None,
**special):
"""
Resolve uncontested elections' vote shares using a specific method
censor : clip the data to a given vote share
winsor : clip the data to a given percentage
drop : drop uncontested elections
impute : impute vote shares from the available data in each year
on other vote shares
impute_recursive: impute vote shares from available data in each year
and the previous year's (possibly imputed) vote share.
impute_singlepass: impute vote shares from available data in each year and
the previous year's vote share. Imputations are not carried forward.
"""
if method.lower() == 'singlepass':
method = 'impute_singlepass'
elif method.lower() == 'recursive':
method = 'impute_recursive'
elif (method.lower().startswith('winsor') or
method.lower().startswith('censor')):
floor, ceil = .1, .9
elif (method.lower() in ('shift', 'drop')):
floor, ceil = .05, .95
elif method.lower().startswith('imp'):
if (special.get('covariates') == []
or special.get('covariates') is None):
self._GIGO("Imputation selected but no covariates "
"provided. Shifting uncontesteds to the "
"mean is likely to harm the validity "
"of inference. Provide a list to "
"coviarate_cols to fix.")
if 'year' not in self.elex_frame:
self._GIGO("Imputation pools over each year. No "
"years were provided in the input "
"dataframe. Provide a year variate "
"in the input dataframe to fix")
floor, ceil = .01, .99
if method.endswith('recursive') or method.endswith('singlepass'):
# to do the stronger imputation, you need to get the redistricting vector
if self.elex_frame.get('redistrict') is None:
Warn('computing redistricting from years vector')
self.elex_frame['redist'] = utils.census_redistricting(
pd.Series(self.elex_frame.year))
elif method.lower() == 'ignore':
floor, ceil = .05, .95
self.elex_frame['uncontested'] = ((self.elex_frame.vote_share > ceil).astype(int)
+ (self.elex_frame.vote_share < floor).astype(int)*-1)
return
else:
raise KeyError("Uncontested method not understood."
"Recieved: {}"
"Supported: 'censor', 'winsor', "
"'shift', 'drop', 'impute', 'ignore',"
" 'impute_recursive', 'impute_singlepass',"
"'singlepass'".format(method))
# if self.elex_frame.vote_share.isnull().any():
# raise self._GIGO("There exists a null vote share with full "
# "covariate information. In order to impute,"
# "the occupancy of the seat should be known. "
# "Go through the data and assign records with "
# "unknown vote share a 0 if the seat was "
# "awarded to the opposition and 1 if the seat "
# "was awarded to the reference party to fix.")
design = self.elex_frame.copy(deep=True)
self._prefilter = self.elex_frame.copy(deep=True)
self.elex_frame = _unc[method](design,
floor=floor, ceil=ceil,
**special)
def _extract_data(self, t=-1, year=None):
"""
get the essential statistics from the `t`th election.
Argument
---------
t : int
index of time desired. This should be a valid index to self.models
year : int
index of year desired. This should be some year contained in the index of self.params
Returns
----------
a tuple of observed data:
turnout : (n,1) vector of the turnouts over n districts in election t
vote_shares : (n,p) the share of votes won by party j, j = 1, 2, ... p
party_vote_shares : (p,) the share of overall votes won by party j
seats : (n,p) the binary indicators showing whether party j won each seat
party_seat_share : (p,) the share of overall seats won by party j
"""
if year is not None:
t = list(self.years).index(year)
obs_refparty_shares = self.wide[t].vote_share[:, None]
obs_vote_shares = np.hstack(
(obs_refparty_shares, 1-obs_refparty_shares))
obs_seats = (obs_vote_shares > .5).astype(int)
obs_turnout = self.wide[t].weight
obs_party_vote_shares = np.average(obs_vote_shares,
weights=obs_turnout, axis=0)
obs_party_seat_shares = np.mean(obs_seats, axis=0)
return (obs_turnout, obs_vote_shares, obs_party_vote_shares,
obs_seats, obs_party_seat_shares)
def _extract_data_in_model(self, t=-1, year=None):
"""
Extract an election from the models
"""
if year is not None:
t = list(self.years).index(year)
obs_refparty_shares = self.models[t].model.endog[:, None]
obs_vote_shares = np.hstack(
(obs_refparty_shares, 1-obs_refparty_shares))
obs_seats = (obs_refparty_shares > .5).astype(int)
obs_turnout = self.models[t].model.weights
obs_party_vote_shares = np.average(
obs_vote_shares, weights=obs_turnout, axis=0)
obs_party_seat_shares = np.mean(obs_seats, axis=0)
return (obs_turnout, obs_vote_shares, obs_party_vote_shares,
obs_seats, obs_party_seat_shares)
def _extract_election(self, t=-1, year=None):
return self._extract_data_in_model(t=t, year=year)
class Plotter(object):
"""
Class to proide plotting capabilities to various seats-votes simulation methods.
"""
def __init__(self):
super().__init__()
@property
def years(self):
raise NotImplementedError("'years' must be implemented on child class {}"
"In order to be used.".format(type(self)))
def _extract_data(self, *args, **kwargs):
raise NotImplementedError("'_extract_data' must be implemented on child class {}"
" in order to be used.".format(type(self)))
def simulate_elections(self, *args, **kwargs):
raise NotImplementedError("'simulate_elections' must be implemented on child class {}"
" in order to be used.".format(type(self)))
def plot_rankvote(self, t=-1, year=None, normalize=False, mean_center=False,
ax=None, fig_kw=dict(), scatter_kw=dict(c='k')):
"""
Plot the rankvote curve for the given time period.
Arguments
---------
t : int
time index
year: int
year to plot. Overrides t
normalize : bool
flag denoting whether to normalize ranks to [0,1]
mean_center : bool
flag denoting whether to center the rankvote to the
party vote share. If both normalize and mean_center,
the plot is actually the seats-votes curve.
ax : matplotlib.AxesSubPlot
an axis to plot the data on. If None, will create a new
figure.
fig_kw : dict
keyword arguments for the plt.figure() call, if applicable.
scatter_kw : dict
keyword arguments for the ax.scatter call, if applicable.
Returns
-------
figure and axis of the rank vote plot
"""
from scipy.stats import rankdata
turnout, vshares, pvshares, *rest = self._extract_data(t=t, year=year)
vshares = vshares[:, 0]
if ax is None:
f = plt.figure(**fig_kw)
ax = plt.gca()
else:
f = plt.gcf()
ranks = rankdata(1-vshares, method='max').astype(float)
if normalize:
ranks = ranks / len(ranks)
if mean_center:
plotshares = np.clip((1 - vshares) + (vshares.mean() - .5),0,1)
else:
plotshares = 1 - vshares
ax.scatter(plotshares, ranks, **scatter_kw)
if normalize and mean_center:
title = 'Seats-Votes Curve ({})'
elif normalize:
title = 'Normalized Rank-Votes Curve ({})'
elif mean_center:
title = 'Centered Rank-Votes Curve ({})'
else:
title = 'Rank-Votes Curve ({})'
if year is None:
year = self.years[t]
ax.set_title(title.format(year))
return f, ax
def plot_empirical_seatsvotes(self, *args, **kwargs):
"""
This is plot_rankvote with normalize and mean_center forced to be true.
"""
kwargs['normalize'] = True
kwargs['mean_center'] = True
return self.plot_rankvote(*args, **kwargs)
def plot_simulated_seatsvotes(self, n_sims=10000, swing=0, Xhyp=None,
target_v=None, t=-1, year=None, predict=False,
ax=None, fig_kw=dict(),
scatter_kw=dict(),
mean_center=True, normalize=True,
silhouette=True,
q=[5, 50, 95],
band=False,
env_kw=dict(), median_kw=dict(),
return_sims=False):
"""
This plots the full distribution of rank-votes for simulated voteshares.
Arguments
n_sims
swing
Xhyp
target_v
t
year
predict
ax
fig_kw
scatter_kw
mean_center
normalize
silhouette
band
q
env_kw
median_kw
return_sims
"""
from scipy.stats import rankdata
if year is not None:
t = list(self.years).index(year)
sims = self.simulate_elections(t=t, year=year, n_sims=n_sims, swing=swing,
target_v=target_v, fix=False, predict=predict)
ranks = np.vstack([rankdata(1-sim, method='max').astype(float)
for sim in sims])
N = len(sims[0])
if ax is None:
f = plt.figure(**fig_kw)
ax = plt.gca()
else:
f = plt.gcf()
if mean_center:
target_v = np.average(self.wide[t].vote_share,
weights=self.wide[t].weight)
shift = (target_v - .5) if mean_center else 0
rescale = N if normalize else 1
if silhouette:
# force silhouette aesthetics
scatter_kw['alpha'] = scatter_kw.get('alpha', .01)
scatter_kw['color'] = scatter_kw.get('color', 'k')
scatter_kw['linewidth'] = scatter_kw.get('linewidth', 0)
scatter_kw['marker'] = scatter_kw.get('marker', 'o')
ptiles = []
for ri in range(1,N+1):
win_by_rank = np.hstack([sim[np.where(rank==ri)]
for sim,rank in zip(sims,ranks)])
ptile = np.nanpercentile(win_by_rank, q=q, keepdims=True).flatten()
if np.isnan(ptile).all(): # when win_by_rank is empty, ptile is numpy.nan
ptile = [np.nan] * len(q) # so it needs to match the size of q
if len(q) == 2:
lo,hi = ptile
med = np.nanmedian(win_by_rank)
ptile = [lo, med, hi]
ptiles.append(ptile)
lo, med, hi = np.vstack(ptiles).T
else:
# suggest these otherwise, if user doesn't provide alternatives
scatter_kw['alpha'] = scatter_kw.get('alpha', .2)
scatter_kw['color'] = scatter_kw.get('color', 'k')
scatter_kw['linewidth'] = scatter_kw.get('linewidth', 0)
scatter_kw['marker'] = scatter_kw.get('marker', 'o')
for sim, rank in zip(sims, ranks):
ax.scatter(np.clip((1-sim)+shift,0,1),
rank/rescale, **scatter_kw)
if silhouette:
env_kw['linestyle'] = env_kw.get('linestyle', '-')
env_kw['color'] = env_kw.get('color', '#FD0E35')
env_kw['linewidth'] = env_kw.get('linewidth', 1)
median_kw['linestyle'] = median_kw.get('linestyle', '-')
median_kw['color'] = median_kw.get('color', '#FD0E35')
if band:
env_kw['alpha'] = .4
ax.fill_betweenx(np.arange(1, N+1)/rescale,
np.clip((1-lo)+shift,0,1),
np.clip((1-hi)+shift,0,1), **env_kw)
else:
ax.plot(np.clip((1-lo)+shift,0,1),
np.arange(1, N+1)/rescale, **env_kw)
ax.plot(np.clip((1-hi)+shift,0,1),
np.arange(1, N+1)/rescale, **median_kw)
ax.plot((1-med)+shift, np.arange(1, N+1)/rescale, **median_kw)
if return_sims:
return f, ax, sims, ranks
return f, ax
class AlwaysPredictPlotter(Plotter):
def plot_simulated_seatsvotes(self, n_sims=10000, swing=0, Xhyp=None,
target_v=None, t=-1, year=None,
ax=None, fig_kw=dict(), predict=True,
scatter_kw=dict(),
mean_center=True, normalize=True,
silhouette=True,
q=[5, 50, 95],
band=False,
env_kw=dict(), median_kw=dict(),
return_sims=False):
if predict is False:
self._GIGO(
"Prediction should always be enabled for {}".format(self.__class__))
return Plotter.plot_simulated_seatsvotes(**vars())
class AdvantageEstimator(object):
@staticmethod
def _do_statistic(sims, *additional_parameters, **named_params):
# do advantage algorithm using simulations & knowns explicitly provided
return
def statistic(self, *additional_parameters, sim_kws={}, stat_kws={}):
sims = self.simulate(sim_kws)
self._do_statistic(self, *additional_parameters, stat_kws)
def get_swing_ratio(self, n_sims=1000, t=-1,
Xhyp=None,
predict=False, use_sim_swing=True):
"""
Generic method to either compute predictive or counterfactual elections.
See also: predict, counterfactal
Arguments
---------
n_sims : int
number of simulations to conduct
t : int
the target year to use for the counterfactual simulations
swing : float
arbitrary shift in vote means
Xhyp : (n,k)
artificial data to use in the simulation
target_v : float
target mean vote share to peg the simulations to. Will
ensure that the average of all simulations conducted is
this value.
fix : bool
flag to denote whether each simulation is pegged exactly
to `target_v`, or if it's only the average of all
simulations pegged to this value.
predict : bool
whether or not to use the predictive distribution or the
counterfactual distribution
use_sim_swing: bool
whether to use the instantaneous change observed in
simulations around the observed seatshare/voteshare
point, or to use the aggregate slope of the seats-votes
curve over all simulations as the swing ratio
"""
# Simulated elections
simulations = self.simulate_elections(n_sims=n_sims, t=t,
swing=None, Xhyp=Xhyp,
target_v=.5, fix=False, predict=predict)
turnout = 1/self.models[t].model.weights
ref_voteshares = np.average(simulations, weights=turnout, axis=1)
grand_ref_voteshare = ref_voteshares.mean()
ref_seatshares = (simulations > .5).mean(axis=1)
grand_ref_seatshare = ref_seatshares.mean()
# chose to do this via tuples so that we can use the method elsewhere
obs_turnout, *rest = self._extract_election(t=t)
obs_voteshares, obs_party_voteshares, *rest = rest
obs_seats, obs_party_seatshares = rest
# Swing Around Median
party_voteshares = np.hstack((ref_voteshares.reshape(-1, 1),
1-ref_voteshares.reshape(-1, 1)))
party_seatshares = np.hstack((ref_seatshares.reshape(-1, 1),
1-ref_seatshares.reshape(-1, 1)))
swing_near_median = est.swing_about_pivot(party_seatshares,
party_voteshares,
np.ones_like(obs_party_voteshares)*.5)
# Swing near observed voteshare
shift_simulations = simulations + (obs_party_voteshares[0] - .5)
shift_ref_voteshares = np.average(shift_simulations,
weights=turnout, axis=1)
shift_ref_seatshares = (shift_simulations > .5).mean(axis=1)
shift_party_voteshares = np.hstack((shift_ref_voteshares.reshape(-1, 1),
1-shift_ref_voteshares.reshape(-1, 1)))
shift_party_seatshares = np.hstack((shift_ref_seatshares.reshape(-1, 1),
1-shift_ref_seatshares.reshape(-1, 1)))
swing_at_observed = est.swing_about_pivot(shift_party_seatshares,
shift_party_voteshares,
obs_party_voteshares)
# Sanity Check
if not np.isfinite(swing_near_median).all():
Warn('The computation returned an infinite swing ratio. Returning for'
' debugging purposes...', stacklevel=2)
return party_seatshares, party_voteshares, obs_party_voteshares
elif not np.isfinite(swing_at_observed).all():
Warn('The computation returned an infinite swing ratio. Returning for'
' debugging purposes...', stacklevel=2)
return (shift_party_seatshares, shift_party_voteshares, obs_party_voteshares)
median_conints = est.intervals(party_seatshares, party_voteshares)
observed_conints = est.intervals(shift_party_seatshares,
shift_party_voteshares)
swing_lm, swing_lm_resid = est.swing_slope(shift_party_seatshares,
shift_party_voteshares)
self._swing_ratios_emp = swing_at_observed[0]
self._swing_ratios_med = swing_near_median[0]
self._swing_ratios_lm = swing_lm.mean() # pool the parties in a 2party system
self._swing_CIs = observed_conints
self._swing_CIs_med = median_conints
self._use_sim_swing = use_sim_swing
return swing_at_observed[0] if use_sim_swing else swing_lm
def _median_bonus_from_simulations(self, sims, q=[5, 50, 95], return_all=False):
"""
Compute the bonus afforded to the reference party using:
B = 2*E[s|v=.5] - 1
where s is the seat share won by the reference party and v is the average vote share won by the reference party.
"""
expected_seatshare = 2*(np.mean((sims > .5), axis=1)-.5)
point_est = np.mean(expected_seatshare)
point_est_std = np.std(expected_seatshare)
if not return_all:
return np.array([point_est - point_est_std*2,
point_est,
point_est + point_est_std*2])
else:
return expected_seatshare
def _observed_bonus_from_simulations(self, sims,
q=[5, 50, 95], return_all=False):
"""
Compute the bonus afforded to the reference party by using:
E[s | v=v_{obs}] - (1 - E[s | v = (1 - v_{obs})])
where s is the seat share won by the reference party and v_{obs} is the observed share of the vote won by the reference party. This reduces to the difference in peformance between the reference party and the opponent when the opponent does as well as the reference.
"""
raise NotImplementedError
if year is not None:
t = self.years.tolist().index(year)
turnout, votes, observed_pvs, *rest = self._extract_election(t=t)
observed_ref_share = observed_pvs[0]
return self.winners_bonus_from_(n_sims=n_sims,
target_v=observed_ref_share,
t=t, Xhyp=Xhyp,
predict=predict, q=q, return_all=return_all)
def estimate_winners_bonus(self, n_sims=1000, t=-1, year=None,
target_v=.5, Xhyp=None, predict=False, q=[5, 50, 95], return_all=False):
"""
Compute the bonus afforded to the reference party by using:
E[s | v=v_i] - (1 - E[s | v = (1 - v_i)])
where s is the seat share won by the reference party and v_i is an arbitrary target vote share. This reduces to a difference in performance between the reference party and the opponent when the opponent and the reference win `target_v` share of the vote.
"""
raise NotImplementedError
if year is not None:
t = self.years.tolist().index(year)
sims = self.simulate_elections(n_sims=n_sims, t=t, Xhyp=Xhyp, predict=predict,
target_v=target_v, fix=True)
complement = self.simulate_elections(n_sims=n_sims, t=t, Xhyp=Xhyp,
predict=predict, target_v=1-target_v,
fix=True)
weights = 1/self.models[t].model.weights
observed_expected_seats = np.mean(sims > .5, axis=1) # what you won
# what your oppo wins when you do as well as they did
complement_opponent_seats = np.mean(1 - (complement > .5), axis=1)
point_est = np.mean(observed_expected_seats -
complement_opponent_seats)
point_est_std = np.std(observed_expected_seats -
complement_opponent_seats)
if not return_all:
return np.array([point_est - 2*point_est_std,
point_est,
point_est + 2*point_est_std])
else:
return observed_expected_seats - complement_opponent_seats
def get_attainment_gap(self, t=-1, year=None, return_all=True):
"""
Get the empirically-observed attainment gap, computed as the minimum vote share required to get a majority of the vote.
G_a = ((.5 - s)/\hat{r} + v) - .5
where s is the observed seat share, r is the estimated responsiveness in time t, and v is the party vote share in time t. Thus, the core of this statistic is a projection of the line with the responsiveness as the slope through the observed (v,s) point to a point (G_a,.5).
Inherently unstable, this estimate is contingent on the estimate of the swing ratio.
"""
raise NotImplementedError
if not return_all:
self._GIGO(
'This cannot return all values, since it does not rely on simulation')
if year is not None:
t = list(self.years).index(year)
try:
return self._attainment_gap[t]
except AttributeError:
turnout, voteshare, *_ = self._extract_election(t)
sr = self.get_swing_ratio(t=t)
return est.attainment_gap(turnout, voteshare, sr)[0][0]
def simulate_attainment_gap(self, t=-1, year=None, Xhyp=None, predict=False, q=[5, 50, 95],
n_sim_batches=1000, sim_batch_size=None,
best_target=None, return_all=False, **optimize_kws
):
"""
Estimate the attainment gap through simulation. Given a target vote share `best_target`,
find the q'th quantiles (5,50,95 by default) of (.5 - minV) where minV is the smallest vote
share in the batch (of size `sim_batch_size`) where the party stil retains a majority of the
house. If this simulation is centered at the "optimal" attainment gap value from `optimal_attainment_gap`,
this should estimate percentile bounds on the smallest attainment gaps at that vote share.
For example, if best_target = .5, then this means `n_sim_batches` of simulations would be conducted
where the average vote share over the entire batch was .5. Over these batches (each one of size `sim_batch_size`),
all realizations where the party wins a majority are retained. Then, the minimum average vote share in these
batches is computed and stored.
After all these minima are computed, the qth quantiles of these minima are returned.
They represent a the typical minimum vote share required by the party to win a majority.
`best_target`, then, simply represents a target for the search space. It should
be small enough that the party occasionally wins very small majorities, but large enough that
they win at least one majority per `sim_batch_size`.
Arguments
----------
t, year, Xhyp, predict (refer to self.simulate_elections)
q : iterable
quantiles to use to summarize the minima
n_sim_batches: int
number of batches with which to simulate minima
sim_batch_size: int
number of elections to simulate within each batch
best_target: float
vote share to center the batches
**optimize_kws: keyword argument dictionary
passed to self.optimal_attainment_gap if no target
is provided.
"""
raise NotImplementedError
if year is None:
year = self._years[t]
elif year is not None:
t = self._years.tolist().index(year)
if sim_batch_size is None:
sim_batch_size = n_sim_batches // 10
if best_target is None:
best_target = .5 + -1 * self.optimal_attainment_gap(t=t, year=year, Xhyp=Xhyp,
predict=predict, q=[
50],
**optimize_kws)
agaps = []
weights = 1/self.models[t].model.weights
counter = 0
retry = 0
for _ in tqdm(range(n_sim_batches),
desc='simulating with target={}'.format(best_target)):
batch = self.simulate_elections(target_v=best_target, t=t, predict=predict,
Xhyp=Xhyp, n_sims=sim_batch_size, fix=False)
majorities = np.asarray(
[((sim > .5).mean() > .5) for sim in batch])
if not majorities.any():
retry += 1
continue
candidate = np.average(
batch[majorities], weights=weights, axis=1).min()
agaps.append(candidate)
if retry > 0:
Warn('no majorities found in {} simulations! Configuration is: '
'\n\t target: \t{} '
'\n\t Xhyp is None: \t{}'
'\n\t batch_size: \t{}'
'\n\t n_batches: \t{}'
''.format(retry, best_target, Xhyp is None,
sim_batch_size, n_sim_batches))
if not return_all:
return np.percentile(.5 - np.asarray(agaps), q=q)
else:
return .5 - agaps
def optimal_attainment_gap(self, t=-1, year=None,
Xhyp=None, predict=False, q=[5, 50, 95],
n_batches=1000, batch_size=None,
loss='mad', return_all=False):
"""
Returns the `q`th percentiles (5,50,95 by default) for (.5 - v*), where
v* is the optimal statewide average vote share that minimizes a loss
function:
loss(.5 - E[s|v*])
Where loss(.) may be mean absolute deviation or squared error loss.
In plain language, this is the excess statewide vote share (v* - .5)
that a party wins when it wins a *bare majority* (its share of seats is
the smallest possible value above 50%) of the representative
body. If this is negative, the party must typically win more than
50% of the votes to win 50% of the seats. If this is positive,
Arguments
---------
t : int
index of the time period to compute the attainment gap.
year : int
the year to compute the attainment gap. Supersedes `t`
Xhyp : np.ndarray
a matrix of hypothetical electoral conditions under which
to estimate the optimal attainment gap.
predict : bool
whether to use the predictive form or counterfactual form
of the election simulators
q : iterable (tuple,list,array)
set of quantiles passed to numpy.quantile
n_batches : int
number of times to estimate the optimal attainment gap. Since
the gap is estimated many times over a stochastic objective,
this governs how many replications of the optimization problem
are conducted.
batch_size : int
size of each simulation batch in the optimization problem.
The total amount of simulated elections will be
n_batches * (batch_size * nfev_per_batch), where nfev_per_batch
is the unknown number of times scipy.optimize.minimize_scalar
will evaluate the objective function. So, if this function is
very slow, batch_size is likely the critical path.
loss : string
the option for loss function type, either 'mad', the mean
absolute deviation, or 'ssd', the sum of squared deviations.
If a callable, it must return a single scalar that represents
some distance metric about how far the seat shares in simulations
from the model in time t fall from having a bare majority.
"""
raise NotImplemetnedError
if year is None:
year = self._years[t]
if batch_size is None:
batch_size = n_batches // 10
elif year is not None:
t = self._years.tolist().index(year)
try:
from scipy.optimize import minimize_scalar
except ImportError:
raise ImportError(
'scipy.optimize is required to use this functionality')
if isinstance(loss, str):
if loss.lower() == 'mad':
def seatgap(target):
"""
the mean absolute gap between the observed seatshare and .5
"""
sims = self.simulate_elections(target_v=target, t=t, n_sims=batch_size,
predict=predict, Xhyp=Xhyp)
seats = np.asarray([(sim > .5).mean()
for sim in sims]).reshape(-1, 1)
mad = np.abs(seats - .5).mean()
return mad.item()
elif loss.lower() == 'ssd':
def seatgap(target):
"""
The sum of squared deviations between the observed seatshare
and .5
"""
sims = self.simulate_elections(target_v=target, t=t, n_sims=batch_size,
predict=predict, Xhyp=Xhyp)
seats = np.asarray([(sim > .5).mean()
for sim in sims]).reshape(-1, 1)
ssd = (seats - .5).T.dot(seats - .5)
return ssd.item()
else:
raise KeyError('Form of seatgap loss function ({}) is not '
'("mad","ssd").'.format(loss.lower()))
elif callable(loss):
seatgap = loss
else:
raise TypeError('loss parameter not recognized as string ("mad", "ssd")'
' or callable')
best_targets = []
for _ in tqdm(range(n_batches), desc='optimizing'):
best_targets.append(minimize_scalar(seatgap,
tol=1e-4,
bounds=(.05, .95),
method='bounded'))
best_xs = np.asarray([op.x for op in best_targets if op.success])
if not return_all:
return np.percentile(.5 - best_xs, q=q)
else:
return .5 - best_xs
def get_efficiency_gap(self, t=-1, year=None, voteshares=None, turnout=True, return_all=True):
"""
Compute the percentage difference of wasted votes in a given election
G_e = W_1 - W_2 / \sum_i^n m
where W_k is the total wasted votes for party k, the number cast in excess of victory:
W_k = sum_i^n (V_{ik} - m_i)_+
Where V_{ik} is the raw vote cast in district i for party k and m_i is the total number of votes cast for all parties in district i
"""
raise NotImplementedError
if not return_all:
self._GIGO(
'This function has no ability to return all of its results, because it does not rely on simulations.')
tvec, vshares, a, b, c = self._extract_election(t=t, year=year)
vshares = voteshares if voteshares is not None else vshares
if not isinstance(turnout, bool):
return est.efficiency_gap(vshares[:, 0], turnout)
elif turnout:
return est.efficiency_gap(vshares[:, 0], tvec)
else:
return est.efficiency_gap(vshares[:, 0], turnout=None)
def estimate_efficiency_gap(self, t=-1, year=None,
Xhyp=None, predict=False, n_sims=1000,
q=[5, 50, 95], turnout=True, return_all=False):
"""
Compute the efficiency gap expectation over many simulated elections.
This uses the same estimator as `get_efficiency_gap`,
but computes the efficiency gap over many simulated elections.
"""
raise NotImplementedError
tvec, *rest = self._extract_election(t=t, year=year)
if not isinstance(turnout, bool):
tvec = turnout
elif not turnout:
tvec = None
sims = self.simulate_elections(
t=t, Xhyp=Xhyp, predict=predict, n_sims=n_sims)
gaps = [est.efficiency_gap(sim.reshape(-1, 1), turnout=tvec)
for sim in sims]
if not return_all:
return np.percentile(gaps, q=q)
else:
return gaps
def district_sensitivity(self, t=-1, Xhyp=None, predict=False, fix=False,
swing=None, n_sims=1000,
batch_size=None, n_batches=None,
reestimate=False, seed=2478879,
**jackknife_kw):
"""
This computes the deletion simulations.
t, Xhyp, predict, fix, swing, n_sims are all documented in simulate_elections.
batch_size and n_batches refer to arguments to optimal_attainment_gap
jackknife_kw refer to cvtools.jackknife arguments
"""
raise NotImplementedError
np.random.seed(seed)
if n_batches is None:
n_batches = n_sims
if batch_size is None:
batch_size = n_batches // 10
original = copy.deepcopy(self.models[t])
leverage = cvt.leverage(original)
resid = np.asarray(original.resid).reshape(-1, 1)
del_models = cvt.jackknife(original, full=True, **jackknife_kw)
del_params = pd.DataFrame(np.vstack([d.params.reshape(1, -1) for d in del_models]),
columns=original.params.index)
if not reestimate: # Then build the deleted models from copies of the original
mods = (copy.deepcopy(original) for _ in range(int(original.nobs)))
del_models = []
for i, mod in enumerate(mods):
mod.model.exog = np.delete(mod.model.exog, i, axis=0)
mod.model.endog = np.delete(mod.model.endog, i)
mod.model.weights = np.delete(mod.model.weights, i)
del_models.append(mod)
rstats = []
# First, estimate full-map statistics
full_mbon = self.estimate_median_bonus(t=t, Xhyp=Xhyp)
full_obon = self.estimate_observed_bonus(t=t, Xhyp=Xhyp)
full_egap_T = self.estimate_efficiency_gap(
t=t, Xhyp=Xhyp, turnout=True)
full_egap_noT = self.estimate_efficiency_gap(
t=t, Xhyp=Xhyp, turnout=False)
full_obs_egap_T = self.get_efficiency_gap(t=t, turnout=True)
full_obs_egap_noT = self.get_efficiency_gap(t=t, turnout=False)
full_agap = self.optimal_attainment_gap(t=t, Xhyp=Xhyp,
batch_size=batch_size,
n_batches=n_batches)
# Then, iterate through the deleted models and compute
# district sensivities in the target year (t).
for idx, mod in tqdm(list(enumerate(del_models)), desc='jackknifing'):
self.models[t] = mod
del_vs = mod.model.endog[:, None]
del_w = mod.model.weights
del_X = mod.model.exog
# make sure the hypothetical gets deleted as well
del_Xhyp = np.delete(
Xhyp, idx, axis=0) if Xhyp is not None else None
# Compute various bias measures:
# the observed efficiency gap (with/without turnout)
obs_egap_t = est.efficiency_gap(del_vs, del_w)
obs_egap_not = self.get_efficiency_gap(t=t, voteshares=del_vs,
turnout=False)
# The median bonus
mbon = self.estimate_median_bonus(t=t, Xhyp=del_Xhyp,
n_sims=n_sims)
# The observed bonus
obon = self.estimate_observed_bonus(t=t, Xhyp=del_Xhyp,
n_sims=n_sims)
# The estimated (simulated) efficiency gap (with/without turnout)
egap_T = self.estimate_efficiency_gap(t=t, Xhyp=del_Xhyp,
n_sims=n_sims,
turnout=mod.model.weights)
egap_noT = self.estimate_efficiency_gap(t=t, Xhyp=del_Xhyp,
n_sims=n_sims,
turnout=False)
agap = self.optimal_attainment_gap(t=t, Xhyp=del_Xhyp,
n_batches=n_batches,
batch_size=batch_size)
rstats.append(np.hstack((obs_egap_t, obs_egap_not,
mbon, obon, egap_T, egap_noT, agap)))
# Reset the model for the time period back to the original model
self.models[t] = original
# prepare to ship everything by building columns & dataframe
rstats = np.vstack(rstats)
cols = (['EGap_eT', 'EGap_enoT']
+ ['{}_{}'.format(name, ptile)
for name in ['MBonus', 'OBonus', 'EGap_T', 'EGap_noT', 'AGap']
for ptile in (5, 50, 95)])
rstats = pd.DataFrame(rstats, columns=cols)
# and the leverage
leverage = pd.DataFrame(np.hstack((np.diag(leverage).reshape(-1, 1),
resid)),
columns=['leverage', 'residual'])
dnames = self._designs[t].district_id
# and the statewide estimates
full_biases = pd.Series(np.hstack((full_obs_egap_T, full_obs_egap_noT,
full_mbon, full_obon,
full_egap_T, full_egap_noT, full_agap))).to_frame().T
full_biases.columns = cols
full_ests = pd.concat(
(self.models[t].params.to_frame().T, full_biases), axis=1)
full_ests['district_id'] = 'statewide'
return pd.concat((full_ests, # stack statewide on top of
pd.concat((dnames.reset_index(drop=True), # district-level results
del_params,
leverage,
rstats),
axis=1, ignore_index=False)),
ignore_index=True, axis=0)
###################################
# Dispatch Table for Uncontesteds #
###################################
def _censor_unc(design, floor=.25, ceil=.75):
"""
This will clip vote shares to the given mask.
"""
indicator = ((design.vote_share > ceil).astype(int) +
(design.vote_share < floor).astype(int) * -1)
design['uncontested'] = indicator
design['vote_share'] = np.clip(design.vote_share,
a_min=floor, a_max=ceil)
return design
def _shift_unc(design, floor=.05, ceil=.95, lower_to=.25, ceil_to=.75):
"""
This replicates the "uncontested.default" method from JudgeIt, which replaces
the uncontested elections (those outside of the (.05, .95) range) to (.25,.75).
"""
indicator = ((design.vote_share > ceil).astype(int) +
(design.vote_share < floor).astype(int) * -1)
design['uncontested'] = indicator
lowers = design.query('vote_share < @floor').index
ceils = design.query('vote_share > @ceil').index
design.ix[lowers, 'vote_share'] = lower_to
design.ix[ceils, 'vote_share'] = ceil_to
return design
def _winsor_unc(design, floor=.25, ceil=.75):
"""
This winsorizes vote shares to a given percentile.
"""
indicator = ((design.vote_share > ceil).astype(int) +
(design.vote_share < floor).astype(int) * -1)
design['uncontested'] = indicator
try:
from scipy.stats.mstats import winsorize
except ImportError:
Warn('Cannot import scipy.stats.mstats.winsorize, censoring instead.',
stacklevel=2)
return _censor_unc(design, floor=floor, ceil=ceil)
# WARNING: the winsorize function here is a little counterintuitive in that
# it requires the ceil limit to be stated as "from the right,"
# so it should be less than .5, just like "floor"
design['vote_share'] = np.asarray(winsorize(design.vote_share,
limits=(floor, 1-ceil)))
return design
def _drop_unc(design, floor=.05, ceil=.95):
"""
This drops uncontested votes that are more extreme than the provided
floor or ceil.
"""
design['uncontested'] = 0
mask = (design.vote_share < floor) + (design.vote_share > (ceil))
return design[~mask]
def _impute_unc(design, covariates, floor=.25, ceil=.75, fit_params=dict()):
"""
This imputes the uncontested seats according
to the covariates supplied for the model. Notably, this does not
use the previous years' voteshare to predict the imputed voteshare.
"""
try:
import statsmodels.api as sm
except ImportError:
Warn("Must have statsmodels installed to conduct imputation",
category=ImportError, stacklevel=2)
raise
indicator = ((design.vote_share > ceil).astype(int) +
(design.vote_share < floor).astype(int) * -1)
design['uncontested'] = indicator
imputed = []
for yr, contest in design.groupby("year"):
mask = (contest.vote_share < floor) | (contest.vote_share > (ceil))
mask |= contest.vote_share.isnull()
contested = contest[~mask]
uncontested = contest[mask]
unc_ix = uncontested.index
imputor = sm.WLS(contested.vote_share,
sm.add_constant(
contested[covariates], has_constant='add'),
weights=contested.weight).fit(**fit_params)
contest.ix[unc_ix, 'vote_share'] = imputor.predict(
sm.add_constant(
uncontested[covariates],
has_constant='add'))
imputed.append(contest)
return pd.concat(imputed, axis=0)
def _impute_singlepass(design, covariates, floor=.01, ceil=.99, fit_params=dict()):
"""
Impute the uncontested vote shares using a single-pass strategy. This means that
a model is fit on mutually-contested elections in each year, and then elections
that are uncontested are predicted out of sample. Critically, imputed values
are *not* propagated forward, so that imputation in time t does not affect estimates
for t+1.
"""
try:
import statsmodels.api as sm
except ImportError:
Warn("Must have statsmodels installed to conduct imputation",
category=ImportError, stacklevel=2)
raise
indicator = ((design.vote_share > ceil).astype(int) +
(design.vote_share < floor).astype(int) * -1)
design['uncontested'] = indicator
wide = utils.make_designs(design,
years=design.year,
redistrict=design.get('redistrict'),
district_id='district_id')
results = []
for i, elex in enumerate(wide):
uncontested = elex.query('vote_share in (0,1)')
contested = elex[~elex.index.isin(uncontested.index)]
covs = copy.deepcopy(covariates)
if 'vote_share__prev' in elex.columns:
covs.append('vote_share__prev')
X = contested[covs].values
Xc = sm.add_constant(X, has_constant='add')
Y = contested[['vote_share']]
model = sm.WLS(endog=Y, exog=Xc, weights=contested.weight,
missing='drop').fit(**fit_params)
OOSXc = sm.add_constant(uncontested[covs].values, has_constant='add')
out = model.predict(OOSXc)
elex.ix[uncontested.index, 'vote_share'] = out
results.append(elex)
results =
|
pd.concat(results, axis=0)
|
pandas.concat
|
from fbprophet import Prophet
from dill import dump, load
import pandas as pd
import os
import json
import datetime
from src.models.model import ModelStrategy
from src.visualization.visualize import plot_prophet_components, plot_prophet_forecast
class ProphetModel(ModelStrategy):
'''
A class representing a Prophet model and standard operations on it
'''
def __init__(self, hparams, log_dir=None):
univariate = True
name = 'Prophet'
self.changepoint_prior_scale = hparams.get('CHANGEPOINT_PRIOR_SCALE', 0.05)
self.seasonality_prior_scale = hparams.get('SEASONALITY_PRIOR_SCALE', 10)
self.holidays_prior_scale = hparams.get('HOLIDAYS_PRIOR_SCALE', 10)
self.seasonality_mode = hparams.get('SEASONALITY_MODE', 'additive')
self.changepoint_range = hparams.get('CHANGEPOINT_RANGE', 0.95)
self.country = hparams.get('COUNTRY', 'CA')
self.future_prediction = None
# Build DataFrame of local holidays
if hparams.get('HOLIDAYS', None) is None:
self.local_holidays = None
else:
holiday_dfs = []
for holiday in hparams.get('HOLIDAYS', []):
holiday_dfs.append(pd.DataFrame({
'holiday': holiday,
'ds':
|
pd.to_datetime(hparams['HOLIDAYS'][holiday])
|
pandas.to_datetime
|
import argparse
import os
import sys
from datetime import date
import numpy
import pandas as pd
from sklearn.externals import joblib
from sklearn.model_selection import cross_validate
from plotting import plot_learning_curve
from utils import load_data, load_data_and_split, load_prediction_data, make_sgd_classifier, one_hot
def main(args):
path = os.getcwd()
parent = os.path.dirname(path)
k = 5
logreg = make_sgd_classifier()
if args.plot:
x, y, x_transformer = load_data(os.path.join(parent, 'data', args.training_data))
# Use n_jobs=-1 to make use of all cores.
plt = plot_learning_curve(
logreg, 'Logistic regression: Accuracy / Training example', x, y.argmax(axis=1), cv=k, n_jobs=-1)
plt.show()
elif args.test_learning_rate:
x, y, x_transformer = load_data(os.path.join(parent, 'data', args.training_data))
eta0s = [0.00001, 0.00003, 0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03]
for eta0 in eta0s:
logreg_eta = make_sgd_classifier(eta0=eta0)
results = cross_validate(logreg_eta, x, y.argmax(axis=1), cv=k, n_jobs=-1, return_train_score=True)
train_score = numpy.mean(results['train_score'])
test_score = numpy.mean(results['test_score'])
print(f'Eta0 {eta0}; Train score {train_score}; Test score {test_score}')
elif args.test_k_fold:
x, y, x_transformer = load_data(os.path.join(parent, 'data', args.training_data))
ks = [3, 5, 7, 10]
for k in ks:
logreg = make_sgd_classifier()
results = cross_validate(logreg, x, y.argmax(axis=1), cv=k, n_jobs=-1, return_train_score=True)
train_score = numpy.mean(results['train_score'])
test_score = numpy.mean(results['test_score'])
print(f'K {k}; Train score {train_score}; Test score {test_score}')
else:
train_x, train_y_non_one_hot, validation_x, validation_y, x_transformer = load_data_and_split(
os.path.join(parent, 'data', args.training_data), k=k)
train_y = one_hot(train_y_non_one_hot)
if args.load_model:
logreg = joblib.load(args.load_model)
else:
logreg.fit(train_x, train_y.argmax(axis=1))
print('Train score: {}'.format(logreg.score(train_x, train_y.argmax(axis=1))))
print('Validation score: {}'.format(logreg.score(validation_x, validation_y.argmax(axis=1))))
if args.predict or args.predict_proba:
predict_data, y, timestamps = load_prediction_data(args.training_data, args.predict, x_transformer)
if args.predict:
predictions = logreg.predict(predict_data)
results = pd.DataFrame(data={'label': y, 'prediction': predictions}, index=timestamps)
print(results)
if args.predict_proba:
predictions = logreg.predict_proba(predict_data)
results =
|
pd.DataFrame(data=predictions, index=timestamps)
|
pandas.DataFrame
|
import copy
import numpy as np
import pandas as pd
from switchwrapper.helpers import (
branch_indices_to_bus_tuple,
match_variables,
recover_branch_indices,
recover_plant_indices,
recover_storage_buses,
split_plant_existing_expansion,
)
def construct_grids_from_switch_results(grid, results):
"""Using the original Grid and Switch expansion results, construct expanded Grid(s).
:param powersimdata.input.grid.Grid grid: Grid instance.
:param pyomo.opt.results.results_.SolverResults results: results from Switch.
:return: (*dict*) -- keys are integers representing the expansion year, values are
Grid objects.
"""
# Extract the upgrade information from the Switch results
build_gen, build_tx, build_storage_energy = extract_build_decisions(results)
# Add this information to the existing grid to create new grids
all_grids = create_upgraded_grids(grid, build_gen, build_tx, build_storage_energy)
return all_grids
def create_upgraded_grids(grid, build_gen, build_tx, build_storage_energy):
"""Add upgrades to existing Grid.
:param powersimdata.input.grid.Grid grid: Grid instance.
:param pandas.DataFrame build_gen: generation expansion decisions.
:param pandas.DataFrame build_tx: transmission expansion decisions.
:param pandas.DataFrame build_storage_energy: storage energy expansion decisions.
:return: (*dict*) -- keys are integers representing the expansion year, values are
Grid objects.
"""
# Build a Grid for each investment year
all_grids = {}
for year in build_tx.year.unique():
# Start with a copy of the original grid (except data_loc, no longer applies)
output_grid = copy.deepcopy(grid)
output_grid.data_loc = None
# Then make additions based on each year's upgrade results
add_tx_upgrades_to_grid(output_grid, build_tx, year)
add_storage_upgrades_to_grid(output_grid, build_gen, build_storage_energy, year)
add_gen_upgrades_to_grid(output_grid, build_gen, year)
# Finally, save
all_grids[year] = output_grid
return all_grids
def add_tx_upgrades_to_grid(grid, build_tx, year):
"""Add transmission upgrades to existing Grid. Note: modifies the grid inplace.
:param powersimdata.input.grid.Grid grid: Grid instance.
:param pandas.DataFrame build_tx: transmission expansion decisions.
:param int year: upgrades year to apply upgrades from.
"""
# Create mapping between Switch branch indices and Grid branch indices
ac_branch_ids, dc_branch_ids = recover_branch_indices(build_tx["tx_id"])
ac_id_unmapping = pd.Series(ac_branch_ids.index, index=ac_branch_ids)
sorted_branch_to_from = [
tuple(sorted(t))
for t in grid.branch[["to_bus_id", "from_bus_id"]].to_numpy().tolist()
]
# Calculate total branch capacity per combination of to/from bus ID
to_from_ac, _ = branch_indices_to_bus_tuple(grid)
to_from_capacity = grid.branch.groupby(["to_bus_id", "from_bus_id"]).rateA.sum()
to_from_capacity.index = to_from_capacity.index.map(lambda x: tuple(sorted(x)))
to_from_capacity = to_from_capacity.groupby(to_from_capacity.index).sum()
# Filter to upgrades in each year, and separate transmission by AC or DC
ac_upgrades = build_tx.query(
"year == @year and tx_id in @ac_branch_ids and capacity > 0"
)
dc_upgrades = build_tx.query(
"year == @year and tx_id in @dc_branch_ids and capacity > 0"
)
# Calculate AC upgrades (total path upgrade / total path starting capacity)
original_index_upgrades = pd.Series(
ac_upgrades.capacity.to_numpy(),
index=ac_upgrades.tx_id.map(ac_id_unmapping),
)
# Ignore upgrades to lines with unlimited original capacity
original_index_upgrades = original_index_upgrades.loc[grid.branch.rateA > 0]
sorted_upgrade_indices = original_index_upgrades.index.map(
lambda x: tuple(sorted(to_from_ac.loc[x]))
).tolist()
to_from_ac_upgrades = pd.Series(
original_index_upgrades.tolist(),
index=sorted_upgrade_indices,
dtype=float,
)
to_from_ac_upgrades = to_from_ac_upgrades.groupby(to_from_ac_upgrades.index).sum()
to_from_ac_upgrade_ratios = 1 + to_from_ac_upgrades / to_from_capacity
with pd.option_context("mode.use_inf_as_na", True):
to_from_ac_upgrade_ratios.fillna(1, inplace=True)
ac_branch_upgrade_ratios = pd.Series(
to_from_ac_upgrade_ratios.loc[sorted_branch_to_from].tolist(),
index=grid.branch.index.tolist(),
)
# Apply AC upgrades (no new branches, scale up rateA and scale down impedance)
grid.branch.rateA.update(grid.branch.rateA * ac_branch_upgrade_ratios)
impedance_updates = grid.branch.x / ac_branch_upgrade_ratios
# Don't update impedance for lines with unlimited original capacity
impedance_updates = impedance_updates.loc[grid.branch.rateA > 0]
grid.branch.x.update(impedance_updates)
# Apply DC upgrades (no new lines, add to Pmax and subtract from Pmin)
dc_upgrades = dc_upgrades.reindex(grid.dcline.index).fillna(0)
grid.dcline.Pmax += dc_upgrades.capacity
grid.dcline.Pmin -= dc_upgrades.capacity
def add_gen_upgrades_to_grid(grid, build_gen, year):
"""Add generation upgrades to existing Grid. Note: modifies the grid inplace.
:param powersimdata.input.grid.Grid grid: Grid instance.
:param pandas.DataFrame build_gen: generation expansion decisions
(including storage).
:param int year: upgrades year to apply upgrades from.
"""
# Extract indices
plant_ids, _ = recover_plant_indices(build_gen["gen_id"], grid.plant.index.max())
existing_plant_ids, expansion_plant_ids = split_plant_existing_expansion(plant_ids)
num_original_plants = len(existing_plant_ids)
new_plant_ids = plant_ids.iloc[num_original_plants:]
new_plant_id_unmapping =
|
pd.Series(new_plant_ids.index, index=new_plant_ids)
|
pandas.Series
|
import json
import plotly
import pandas as pd
import joblib
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.base import BaseEstimator, TransformerMixin
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
"""
Passed string is normalized, lemmatized, and tokenized
Parameters
-----------
text : str
text to be tokenized
Returns
----------
clean_tokens : list
Contains generated tokens
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
This transformer class extract the starting verb of a sentence
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return
|
pd.DataFrame(X_tagged)
|
pandas.DataFrame
|
#!/usr/bin/env python
""" Quartet DNAseq Report plugin module """
from __future__ import print_function
from collections import OrderedDict
import logging
import os
import pandas as pd
import numpy as np
import seaborn as sns
from multiqc import config
from multiqc.plots import table, scatter
from multiqc.modules.base_module import BaseMultiqcModule
# Initialise the main MultiQC logger
log = logging.getLogger('multiqc')
class MultiqcModule(BaseMultiqcModule):
def __init__(self):
# Halt execution if we've disabled the plugin
if config.kwargs.get('disable_plugin', True):
return None
# Initialise the parent module Class object
super(MultiqcModule, self).__init__(
name='Variant Calling Quality Control',
target='Variant calling QC',
info=' is an report module to show quality assessment of the variant calling.'
)
# Find and load input files
## historical batches' performance
ref_fn = "history.txt"
reference_path = os.path.join(os.path.dirname(__file__), 'reference_datasets', ref_fn)
history_df = pd.read_csv(reference_path, sep='\t')
if len(history_df) == 0:
log.debug('No file matched: variant_calling_qc/reference_datasets - history.txt')
### SUMMARY TABLE 1
snv_indel_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from collections import Counter, OrderedDict
from itertools import product
import os
import re
import sys
import string
import time
import geopandas
import matplotlib.pyplot as plt
from itertools import permutations
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12, Set1_9, Dark2_8
from palettable.colorbrewer.diverging import RdYlGn_6, RdYlGn_11
from palettable.cmocean.sequential import Haline_16, Thermal_10
import seaborn as sns
from scipy.spatial.distance import jensenshannon
from scipy.stats import linregress, pearsonr
from shapely.geometry.point import Point
import biases
import graphs
import utils
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
BASE_DIR = '/home/johnmcbride/projects/Scales/Data_compare/'
SRC_DIR = "/home/johnmcbride/projects/Scales/Data_compare/Src"
DATA_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Data_for_figs/'
REAL_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Processed/Real'
FIG_DIR = '/home/johnmcbride/Dropbox/phd/LaTEX/Scales/Figures'
###########################
### FIG 1 ###########
###########################
### See Distinguishability folder for code
###########################
### FIG 3 ###########
###########################
def sensitivity_all(df1, df2, X='logq', Y1='JSD'):
fig = plt.figure(figsize=(12,12))
gs = gridspec.GridSpec(5,3, width_ratios=[1, .3, 1], height_ratios=[1, 1, .4, 1, 1])
gs.update(wspace=0.00 ,hspace=0.20)
ax = [fig.add_subplot(gs[0,0]), fig.add_subplot(gs[1,0]),
fig.add_subplot(gs[0,2]), fig.add_subplot(gs[1,2]),
fig.add_subplot(gs[3,0]), fig.add_subplot(gs[4,0]),
fig.add_subplot(gs[3,2]), fig.add_subplot(gs[4,2])]
col = [(0,0,0)] + list(np.array(Thermal_10.mpl_colors)[[2,5,8]])
al = 0.7
# df.loc[df.bias=='Nhs_n1_w10', 'bias'] = 'hs_n1_w10'
# df.loc[df.bias=='Nhs_n1_w20', 'bias'] = 'hs_n1_w20'
# if df.JSD.max() < 0.1:
# df['JSD'] = df['JSD'] * 1000.
# if df1.JSD.max() < 0.1:
# df1['JSD'] = df1['JSD'] * 1000.
# if df2.JSD.max() < 0.1:
# df2['JSD'] = df2['JSD'] * 1000.
ddf = df1.loc[(df1.n_notes==7)&(df1.bias_group=='TRANS')&(df1.min_int==80)&(df1.max_int==1200)].reset_index(drop=True)
biases = [f"TRANS_{i}" for i in range(1,4)]
all_b = biases
lbl = [r'n={0}'.format(n) for n in range(1,4)]
for i, bias in enumerate(biases):
for j, Y in enumerate(['JSD', 'fr_10']):
if j:
sns.scatterplot(x=X, y=Y, data=ddf.loc[(ddf.bias==bias)], ax=ax[j+4], c=col[i])
# x_fit, y_fit, popt = graphs.simple_fit(ddf.loc[(ddf.bias==bias), X], ddf.loc[(ddf.bias==bias), Y])
else:
sns.scatterplot(x=X, y=Y, data=ddf.loc[(ddf.bias==bias)], ax=ax[j+4], label=lbl[i], c=col[i])
fn = lambda x, a, b, c: a*x**2 + b*x + c
# x_fit, y_fit, popt = graphs.simple_fit(ddf.loc[(ddf.bias==bias), X], ddf.loc[(ddf.bias==bias), Y], fit_fn=fn)
# ax[j+4].plot(x_fit, y_fit, c=col[i], alpha=al)
ddf = df2.loc[(df2.n_notes==7)&(df2.bias_group=='HAR')&(df2.min_int==80)&(df2.max_int==1200)&(df2.fName.apply(lambda x: len(x.split('_'))==10))].reset_index(drop=True)
biases = [f"HAR_{w}_1" for w in [5,10,15,20]]
all_b += biases
lbl = [r'w={0}'.format(w*2) for w in range(5,25,5)]
for i, bias in enumerate(biases):
for j, Y in enumerate(['JSD', 'fr_10']):
if j:
sns.scatterplot(x=X, y=Y, data=ddf.loc[(ddf.bias==bias)], ax=ax[j], c=col[i])
else:
sns.scatterplot(x=X, y=Y, data=ddf.loc[(ddf.bias==bias)], ax=ax[j], label=lbl[i], c=col[i])
# x_fit, y_fit, popt = graphs.simple_fit(ddf.loc[(ddf.bias==bias), X], ddf.loc[(ddf.bias==bias), Y])
# ax[j].plot(x_fit, y_fit, c=col[i], alpha=al)
ddf = df2.loc[(df2.n_notes==7)&(df2.bias_group=='FIF')&(df2.min_int==80)&(df2.max_int==1200)].reset_index(drop=True)
biases = [f"FIF_{w}" for w in [5,10,15,20]]
all_b += biases
for i, bias in enumerate(biases):
for j, Y in enumerate(['JSD', 'fr_10']):
if j:
sns.scatterplot(x=X, y=Y, data=ddf.loc[(ddf.bias==bias)], ax=ax[j+2], c=col[i])
else:
sns.scatterplot(x=X, y=Y, data=ddf.loc[(ddf.bias==bias)], ax=ax[j+2], label=lbl[i], c=col[i])
# x_fit, y_fit, popt = graphs.simple_fit(ddf.loc[(ddf.bias==bias), X], ddf.loc[(ddf.bias==bias), Y])
# ax[j+2].plot(x_fit, y_fit, c=col[i], alpha=al)
df = pd.concat([df1, df2], ignore_index=True).reset_index(drop=True)
df = df.loc[(df.logq > -5.5)].reset_index(drop=True)
df = df.loc[(df.n_notes==7)&(df.bias.apply(lambda x: x in all_b))&(df.max_int==1200)&(df.logq>-5)].reset_index(drop=True)
sns.catplot(x='min_int', y='JSD', data=df, kind='boxen', ax=ax[-2])
sns.catplot(x='min_int', y='fr_10', data=df, kind='boxen', ax=ax[-1])
ax = np.array(ax)
txt = ['HAR', 'FIF', 'TRANS']
for a in ax[:6]:
a.set_xlim(-5.5, 0)
for a in ax[[0, 2, 4, 6]]:
a.set_ylabel('JSD')
for a in ax[[1, 3, 5, 7]]:
a.set_xlabel(r'$\log_{10}q$')
a.set_ylabel(r'$f_\textrm{D}$')
for i, a in enumerate(ax[[0, 2, 4]]):
# a.set_ylim(0.0, 0.5)
a.set_title(txt[i], fontsize=16)
for a in ax[[0,2]]:
a.legend(bbox_to_anchor=(0.90, 0.80), frameon=False, ncol=2, handletextpad=0, columnspacing=0)
ax[4].legend(bbox_to_anchor=(1.00, 0.40), frameon=False, ncol=2, handletextpad=0, columnspacing=0)
for a in ax[[0, 2, 4, 6]]:
a.set_xticks([])
a.set_xlabel('')
for a in ax[[1, 3, 5]]:
a.set_ylim(0, 0.40)
ax[7].set_xlabel(r'$I_{\textrm{min}}$')
# X = [-0.11, -0.11, -0.27, -0.17]
# Y = [1.05, 1.05, 1.02, 1.02]
for i, a in enumerate(ax[[0,2,4,6]]):
a.text(-.17, 1.05, string.ascii_uppercase[i], transform=a.transAxes, weight='bold', fontsize=16)
fig.savefig(os.path.join(FIG_DIR, 'sensitivity.pdf'), bbox_inches='tight')
###########################
### FIG 4 ###########
###########################
def plot_2note_probabilities(df_real, paths, n=7, w=0.2):
fig, ax = plt.subplots(2,5, figsize=(10,6))
plt.subplots_adjust(hspace=0.80) #wspace=0.3, hspace=0.2)
xxx_1 = np.arange(3)
xxx_2 = np.arange(4)
width_1 = 0.8
width_2 = 0.4
lbls_1 = ['S', 'M', 'L']
lbls_2 = ['M\nM', 'M\nX', 'X\nE', 'X\nO']
lbls_3 = ['MIN', 'TRANS', 'HAR', 'FIF', 'DAT']
df_list = [pd.read_feather(paths[l][n]) for l in lbls_3[:4]] + [df_real]
hist_1 = np.zeros(3)
hist_2 = np.zeros(4)
hist_3 = np.zeros(4)
col1 = RdYlGn_6.hex_colors
col2 = Paired_12.mpl_colors
col = [col2[6], col2[0], col2[2], col2[4], 'k']
for i, df in enumerate(df_list):
pair_ints = np.array([np.array([float(x) for x in y.split(';')]) for y in df.loc[df.n_notes==n,'pair_ints']])
bins = [0] + [1200./n * (1. + i * w) for i in [-1, 1]]
total_scales = len(pair_ints)
for y in pair_ints:
for j in range(len(y)):
if y[j-1] < bins[1]:
hist_1[0] += 1
elif bins[1] <= y[j-1] < bins[2]:
hist_1[1] += 1
else:
hist_1[2] += 1
k = j-1
if k == -1:
k = n-1
if y[k] < bins[1] and y[j] < bins[1]:
hist_2[2] += 1
elif y[k] >= bins[2] and y[j] >= bins[2]:
hist_2[2] += 1
elif y[j] < bins[1] and y[k] >= bins[2]:
hist_2[3] += 1
elif y[k] < bins[1] and y[j] >= bins[2]:
hist_2[3] += 1
elif bins[1] <= y[k] < bins[2] and bins[1] <= y[j] < bins[2]:
hist_2[0] += 1
elif bins[1] <= y[k] < bins[2] and not bins[1] <= y[j] < bins[2]:
hist_2[1] += 1
elif bins[1] <= y[j] < bins[2] and not bins[1] <= y[k] < bins[2]:
hist_2[1] += 1
hist_1 /= float(total_scales * n)
hist_2 /= float(total_scales * n)
hist_3[0] = hist_1[1]**2
hist_3[1] = hist_1[1] * sum(hist_1[[0,2]]) * 2
hist_3[2] = hist_1[0]**2 + hist_1[2]**2
hist_3[3] = hist_1[0]*hist_1[2]*2
ax[0,i].bar(xxx_1, hist_1, width_1, color=col[i], edgecolor='grey')
ax[0,i].set_xticks(xxx_1)
ax[0,i].set_xticklabels(lbls_1)
ax[1,i].bar(xxx_2, hist_2, width_2, color=col[i], label=lbls_3[i], edgecolor='grey')
ax[1,i].bar(xxx_2+width_2, hist_3, width_2, label='rand', color='w', edgecolor='grey', hatch='///')
ax[1,i].set_xticks(xxx_2)
ax[1,i].set_xticklabels(lbls_2, rotation=00)
ax[1,i].legend(bbox_to_anchor=(1.1, 1.50), frameon=False)
### 2gram distribution
# dist = utils.get_2grams_dist(df.loc[df.n_notes==7], dI=60)
# sns.heatmap(np.log(dist+0.1), label=str(n), ax=ax[0,i])
# ax[0,i].invert_yaxis()
# ax[0,i].set_title(lbls_3[i])
for a in ax[0,:]:
a.set_ylim(0, 0.58)
for a in ax[1,:]:
a.set_ylim(0, 0.52)
for a in ax[:,1:].ravel():
a.set_yticks([])
ax[0,0].set_ylabel('Probability')
ax[1,0].set_ylabel('Probability')
for i, a in enumerate(ax[:,0]):
a.text(-.50, 1.05, string.ascii_uppercase[i], transform=a.transAxes, weight='bold', fontsize=16)
plt.savefig(os.path.join(FIG_DIR, 'mixing_categories.pdf'), bbox_inches='tight')
###########################
### FIG 5 ###########
###########################
def plot_mixing_effects(df1, df2, n=7):
df = pd.concat([df1, df2], ignore_index=True).reset_index(drop=True)
biases = [[f"HAR_{w:2d}" for w in [5,10,15,20]] + [f"HAR_{w:2d}_1" for w in [5,10,15,20]]]
biases.append([f"FIF_{w:2d}" for w in [5,10,15,20]])
biases.append([f"TRANS_{i}" for i in range(1,4)])
fig, ax = plt.subplots(figsize=(10,10))
lbl = ['HAR', 'FIF', 'TRANS']
col = np.array(Paired_12.mpl_colors)[[3,5,1]]
ax.plot([0,.40], [0,.40], '-', c='k')
for i, bias in enumerate(biases):
print(n, bias)
ddf = utils.return_beta_below_optimum(df, bias, n)
print(len(ddf))
print(ddf.mfr_10.sum() / ddf.fr_10.sum())
sns.scatterplot(x='fr_10', y='mfr_10', data=ddf, label=lbl[i], alpha=0.5, c=col[i])
if len(ddf):
reg = linregress(ddf['fr_10'].values.astype(float), ddf['mfr_10'].values.astype(float))
X2 = np.arange(0,0.5, 0.1)
ax.plot(X2, reg.intercept + reg.slope * X2, '-', c=col[i])
ax.annotate(r"$y={0:4.2f} + {1:4.2f}x$".format(reg.intercept, reg.slope), (0.30, 0.05+i*0.03), color=col[i])
ax.set_xlabel(r'Original $f_\textrm{D}$')
ax.set_ylabel(r'Well-mixed scales $f_\textrm{D}$')
ax.legend(loc='best', frameon=False)
ax.set_xlim(0, 0.4)
ax.set_ylim(0, 0.4)
plt.savefig(os.path.join(FIG_DIR, 'mixing.pdf'), bbox_inches='tight')
###########################
### FIG 6 ###########
###########################
def harmonic_model_correlation(df, corr_mat='None', confidence='None'):
n_arr = np.array([1, 2, 3, 5, 10])
w_arr = np.array([5, 10, 15, 20])
if isinstance(corr_mat, str):
corr_mat = np.zeros((w_arr.size, n_arr.size), dtype=float)
confidence = np.zeros((w_arr.size, n_arr.size), dtype=float)
for i, w in enumerate(w_arr):
FIF = [float(len([z for z in y.split(';') if abs(702-int(z)) <= w]) / len(y.split(';'))) for y in df.all_ints2]
for j, n in enumerate(n_arr):
att = utils.get_attractors(n, diff=w)
HAR = df.all_ints2.apply(lambda x: np.mean([utils.get_similarity_of_nearest_attractor(int(y), att[1], att[3]) for y in x.split(';')]))
corr = pearsonr(FIF, HAR)
corr_mat[i,j] = corr[0]
confidence[i,j] = corr[1]
xi, yi = np.meshgrid(n_arr, w_arr)
df_heat = pd.DataFrame(data={r'$m$':xi.ravel(), r'$w$':yi.ravel(), "R":corr_mat.ravel()})
df_heat[r'$w$'] = df_heat[r'$w$'] * 2
fig = plt.figure(figsize=(10,5))
gs = gridspec.GridSpec(1,2, width_ratios=[1, 1])
gs.update(wspace=0.40 ,hspace=0.00)
ax = [fig.add_subplot(gs[0]), fig.add_subplot(gs[1])]
sns.set(font_scale=1.5)
sns.set_style('white')
sns.set_style("ticks", {'ytick.major.size':4})
models = np.load("/home/johnmcbride/projects/Scales/Vocal/gill_vs_model_correlations.npy")
fif = 0.419
corr = list(np.array(models[1], dtype=float)) + [fif]
lbls = ["Harrison 2018", "Milne 2013", "Pancutt 1988", "Parncutt 1994", "Stolzenburg 2015", "FIF"]
ax[0].bar(range(len(corr)), np.abs(corr), color='white', edgecolor='k', ecolor='k')
for i in range(len(corr)):
ax[0].annotate(f"{abs(corr[i]):4.2f}", (i-0.29, abs(corr[i])+0.05), fontsize=14)
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[0].set_xticks(range(len(corr)))
ax[0].set_xticklabels(lbls, rotation=90)
ax[0].set_ylim(0,1.3)
ax[0].set_ylabel(r"Pearson's $r$")
for i, a in enumerate(ax):
a.text(-.15, 1.05, string.ascii_uppercase[i], transform=a.transAxes, weight='bold', fontsize=16)
sns.heatmap(df_heat.pivot(r'$m$', r'$w$', 'R'), ax=ax[1], annot=corr_mat.T, cbar_kws={'label':r"Pearson's $r$"})
ax[1].invert_yaxis()
plt.savefig(os.path.join(FIG_DIR, 'har_metric_correlations.pdf'), bbox_inches='tight')
return corr_mat, confidence
###########################
### FIG 7 ###########
###########################
def found_scales(paths, df):
fig = plt.figure(figsize=(10,18))
gs = gridspec.GridSpec(3,3, width_ratios=[1, 1, 1], height_ratios=[1.2, 0.2, 0.6])
gs.update(wspace=0.20 ,hspace=0.60)
ax = np.array([[fig.add_subplot(gs[i,j]) for j in range(3)] for i in range(3)])
theoryA = "TRANS"
theoryB = "FIF"
theoryC = "HAR"
idx1 = list(set([int(i) for n in range(4,10) for ss in pd.read_feather(paths[theoryA][n])["ss_w10"].values for i in ss.split(';') if len(i)]))
idx2 = list(set([int(i) for n in range(4,10) for ss in pd.read_feather(paths[theoryB][n])["ss_w10"].values for i in ss.split(';') if len(i)]))
idx3 = list(set([int(i) for n in range(4,10) for ss in pd.read_feather(paths[theoryC][n])["ss_w10"].values for i in ss.split(';') if len(i)]))
truth_table = [lambda x, idx1, idx2: x not in idx1 and x not in idx2,
lambda x, idx1, idx2: x in idx1 and x not in idx2,
lambda x, idx1, idx2: x not in idx1 and x in idx2,
lambda x, idx1, idx2: x in idx1 and x in idx2,
lambda x, idx1, idx2: x in idx1 or x in idx2]
lbls = [['neither', a, b, 'both'] for a, b in zip([theoryA, theoryA, theoryC], [theoryB, theoryC, theoryB])]
cat = 'cl_16'
c_lbls = 'abcdefghijklmnop'
ft = 12
width = 0.80
###############################
### Stacked bar chart
###############################
new_red = [min(1, x) for x in np.array(Paired_12.mpl_colors[5])*1.0]
new_blu = [min(1, x) for x in np.array(Paired_12.mpl_colors[1])*1.0]
new_gre = [min(1, x) for x in np.array(Paired_12.mpl_colors[3])*1.0]
col = [[0.8]*3, new_blu, new_red, [.5]*3]
al = [1] + [0.7]*2 + [1]
tots = {k:float(len(df.loc[(df.n_notes<=9)&(df.n_notes>=4)&(df[cat]==k)])) for k in df[cat].unique()}
uniq = sorted([x for x in df.loc[df[cat].notnull(),cat].unique()])
idx = [i for i in df.index if truth_table[-1](i, idx1, idx2)]
parts = {k:float(len(df.loc[(df.n_notes<=9)&(df.n_notes>=4)&(df[cat]==k)&([True if x in idx else False for x in df.index])])) for k in df[cat].unique()}
fracs = [parts[k] / tots[k] for k in uniq]
# idxsort = [9, 4, 10, 6, 8, 3, 11, 5, 0, 15, 1, 14, 7, 13, 2, 12]
idxsort = [14, 15, 3, 13, 2, 7, 11, 0, 1, 6, 4, 12, 8, 10, 9, 5]
idx_list = zip([idx1, idx1, idx3], [idx2, idx3, idx2])
cols = [[[0.8]*3, a, b, [.5]*3] for a, b in zip([new_blu, new_blu, new_gre], [new_red, new_gre, new_red])]
for j, idx_set in enumerate(idx_list):
base = np.zeros(len(c_lbls))
for i, tt in enumerate(truth_table[:4]):
idx = [i for i in df.index if tt(i, idx_set[0], idx_set[1])]
cnts = df.loc[idx, cat].value_counts()
Y = np.array([cnts[k] if k in cnts.keys() else 0 for k in uniq])
# print(f"{lbls[j][i]} total: {Y.sum()}")
X = np.arange(1,len(uniq)+1)[::-1]
Y = Y[idxsort]
ax[0,j].barh(X, Y, width, left=base, color=cols[j][i], label=lbls[j][i], alpha=al[i])
base = Y + base
ax[0,j].set_ylim(0.5, 16.5)
ax[0,j].set_yticks(range(1,17)[::-1])
ax[0,j].set_xticks(np.arange(0,125,25))
ax[0,j].set_xticklabels(np.arange(0,125,25), fontsize=ft+4)
ax[0,j].set_xlabel(f"scales found", fontsize=ft+4)
ax[0,j].set_yticklabels(list(c_lbls), fontsize=ft+4, ha="center")
ax[0,j].tick_params(axis='y', which='major', pad=8, width=0.5)
ax[0,j].spines['top'].set_visible(False)
ax[0,j].spines['right'].set_visible(False)
handles = [mpatches.Patch(color=c, label=l) for c,l in zip([[0.8]*3, new_blu, new_red, new_gre, [0.5]*3], ['neither', 'TRANS', 'FIF', 'HAR', 'both'])]
ax[0,1].legend(loc='center right', bbox_to_anchor=( 2.00, 1.10), handles=handles, frameon=False, ncol=5, fontsize=ft+2, columnspacing=2.2)
cols = [['k']*3, [[0.8]*3]*3, [new_blu, new_red, new_gre]]
lbls = [theoryA, theoryB, theoryC]
width = 0.5
al = [1, 1, 0.7]
X = range(2)[::-1]
for i, idx in enumerate([idx1, idx2, idx3]):
base = np.array([len(df.loc[((df.min_int<70)|(np.abs(df.octave-1200)>10))&(df.Theory==s)]) for s in ['Y', 'N']])
ax[1,i].barh(X, base, width, color=cols[0][i], alpha=0.9)
for j, tt in enumerate([False, True]):
count = np.array([df.loc[(df.min_int>=70)&(np.abs(df.octave-1200)<=10)&([(i in idx)==tt for i in df.index]), 'Theory'].value_counts()[s] for s in ['Y', 'N']])
ax[1,i].barh(X, count, width, left=base, color=cols[j+1][i], alpha=al[j+1])
base += count
ax[1,i].set_yticks([])
ax[1,i].set_xticks(np.arange(0,310,100))
ax[1,i].set_xticklabels(np.arange(0,310,100), fontsize=ft+4)
ax[1,i].set_xlabel("Number of scales", fontsize=ft+4)
ax[1,0].set_yticks(X)
ax[1,0].set_yticklabels(['Theory', 'Measured'], fontsize=ft+4)
handles = [mpatches.Patch(color=c, label=l) for c,l in zip(['k', [0.8]*3, new_blu, new_red, new_gre], ['prohibited', 'not found', 'TRANS', 'FIF', 'HAR'])]
ax[1,2].legend(loc='center right', bbox_to_anchor=(1.00, 1.40), handles=handles, frameon=False, ncol=5, fontsize=ft+2, columnspacing=2.2)
Cont = ['Western', 'Middle East', 'South Asia', 'East Asia', 'South East Asia', 'Africa', 'Oceania', 'South America']
X = np.arange(len(Cont))[::-1]
width = 0.5
for i, idx in enumerate([idx1, idx2, idx3]):
base = np.array([len(df.loc[((df.min_int<70)|(np.abs(df.octave-1200)>10))&(df.Continent==s)]) for s in Cont])
ax[2,i].barh(X, base, width, color=cols[0][i], alpha=0.9)
for j, tt in enumerate([False, True]):
count = np.array([len(df.loc[(df.min_int>=70)&(np.abs(df.octave-1200)<=10)&([(i in idx)==tt for i in df.index])&(df.Continent==s)]) for s in Cont])
ax[2,i].barh(X, count, width, left=base, color=cols[j+1][i], alpha=al[j+1])
base += count
ax[2,i].set_yticks([])
ax[2,i].set_xticks(np.arange(0,150,50))
ax[2,i].set_xticklabels(np.arange(0,150,50), fontsize=ft+4)
ax[2,i].set_xlabel("Number of scales", fontsize=ft+4)
ax[2,0].set_yticks(X)
ax[2,0].set_yticklabels(Cont, fontsize=ft+4)
handles = [mpatches.Patch(color=c, label=l) for c,l in zip(['k', [0.8]*3, new_blu, new_red, new_gre], ['prohibited', 'not found', 'TRANS', 'FIF', 'HAR'])]
ax[2,2].legend(loc='center right', bbox_to_anchor=(1.00, 1.15), handles=handles, frameon=False, ncol=5, fontsize=ft+2, columnspacing=2.2)
ax[0,0].text(-0.50, 1.15, "A", transform=ax[0,0].transAxes, fontsize=ft+10)
ax[1,0].text(-0.50, 1.15, "B", transform=ax[1,0].transAxes, fontsize=ft+10)
ax[2,0].text(-0.50, 1.15, "C", transform=ax[2,0].transAxes, fontsize=ft+10)
fig.savefig(os.path.join(FIG_DIR, 'si_found_scales.pdf'), bbox_inches='tight')
###########################
### FIG 8 ###########
###########################
def trans_fif_correlation(df_min, df_real, X='distI_2_0'):
fig, ax = plt.subplots(1,2, figsize=(10,5))
plt.subplots_adjust(wspace=0.3)#hspace=0.2)
lbls = ['MIN', 'DAT']
cols = [RdYlGn_6.hex_colors[1], 'k']
C = [1.0, 0]
df_min2 = pd.concat(df_min)
X2 = np.arange(-0.02, 0.10, 0.01)
al = 0.2
idx = np.random.randint(len(df_min2), size=10000)
for i, Y in enumerate(['Nim5_r0.0_w10', 'hs_n1_w10']):
print(df_min2.loc[idx,X].values)
ax[i].scatter(df_min2.loc[idx, X].values, 1./(C[i] + np.array(df_min2.loc[idx, Y].values)), alpha=al, color='w', edgecolor=cols[0])
ax[i].scatter(df_real[X], 1./(C[i] + df_real[Y]), alpha=al, color='w', edgecolor=cols[1])
reg_min = linregress(df_min2[X], 1./(C[i] + df_min2[Y]))
reg_dat = linregress(df_real[X], 1./(C[i] + df_real[Y]))
ax[i].plot(X2, reg_min.intercept + reg_min.slope * X2, '-', c=cols[0])
ax[i].plot(X2, reg_dat.intercept + reg_dat.slope * X2, '-', c=cols[1])
ax[i].annotate(r"$r={0:5.2f}$".format(reg_min.rvalue), (0.65, 0.10), xycoords="axes fraction", color=cols[0])
ax[i].annotate(r"$r={0:5.2f}$".format(reg_dat.rvalue), (0.65, 0.20), xycoords="axes fraction", color=cols[1])
print(f"Correlation between {X} and {Y} in {lbls[0]}:\t{pearsonr(df_min2[X], 1./(C[i]+df_min2[Y]))[0]}")
print(f"Correlation between {X} and {Y} in {lbls[1]}:\t{pearsonr(df_real[X], 1./(C[i]+df_real[Y]))[0]}")
print(reg_min)
print(reg_dat)
ax[i].set_xticks(np.arange(0,0.10, 0.02))
ax[i].set_xlabel(r"$C_{\textrm{TRANS}}(n=2)$")
ax[0].set_yticks([.8, .9, 1])
ax[1].set_yticks(np.arange(.02, .1, .02))
ax[1].set_ylim(0.010, 0.085)
ax[0].set_ylabel(r"$C_{\textrm{FIF}}(w=20)$")
ax[1].set_ylabel(r"$C_{\textrm{HAR}}(w=20)$")
fig.savefig(os.path.join(FIG_DIR, 'trans_fif_corr.pdf'), bbox_inches='tight')
###########################
### FIG 9 ###########
###########################
def diabolo_in_music_SI(paths, df_real, diff=20, ver=2, dia=600):
fig, ax = plt.subplots(figsize=(8,5))
nnn = range(4,10)
idx = [64, 56, 152, 62, 23]
lbls = ['RAN', 'MIN', 'FIF', 'HAR', 'TRANS']
count = []
pat = ['--'] + ['-']*4
col1 = RdYlGn_6.hex_colors
col2 = Paired_12.mpl_colors
cols = ['k', col1[1], col1[0], col2[3], col2[1]]
if ver == 2:
df_real = utils.get_all_ints(df_real)
for i, n in enumerate(nnn):
if ver == 2:
all_ints = utils.extract_floats_from_string(df_real.loc[df_real.n_notes==n,'all_ints2'])
else:
all_ints = utils.extract_floats_from_string(df_real.loc[df_real.n_notes==n,'all_ints'])
count.append(len([1 for x in all_ints if dia-diff<x<dia+diff]) / len(all_ints))
ax.plot(nnn, np.array(count)*100, '-', label="DAT", c='k')
for i in range(len(idx)):
count = []
for j, n in enumerate(nnn):
df = pd.read_feather(paths[lbls[i]][n])
if ver == 2:
# df = utils.get_all_ints(df)
all_ints = utils.extract_floats_from_string(df.all_ints2)
else:
all_ints = utils.extract_floats_from_string(df.all_ints)
count.append(len([1 for x in all_ints if dia-diff<x<dia+diff]) / len(all_ints))
ax.plot(nnn, np.array(count)*100, pat[i], label=lbls[i], c=cols[i])
ax.legend(loc='best', frameon=False, ncol=3, fontsize=12)
ax.set_xlabel(r'$N$')
ax.set_ylabel("Percentage of tritone intervals")
ax.set_ylim(1.7, 8)
# ax.set_yticks([])
plt.savefig(os.path.join(FIG_DIR, 'tritone.pdf'), bbox_inches='tight')
############################
### FIG 10 ###########
############################
def count_harmonic_intervals(df_real, att, n_att=20, real=True, inv=False):
fig, ax = plt.subplots(2,1, figsize=(10,7))
plt.subplots_adjust(hspace=0.40) #wspace=0.3, hspace=0.2)
if inv:
att_idx = np.argsort(att[3])[:n_att]
else:
att_idx = np.argsort(att[3])[::-1][2:n_att+2]
int_cents = att[1][att_idx]
count = {}
for n in range(5,8):
if real:
count[n] = utils.count_ints(df_real.loc[df_real.n_notes==n], int_cents)
else:
count[n] = utils.count_ints(df_real[n], int_cents)
ax[0].plot(count[n], label=f"N={n}")
sns.regplot(np.array(att[3])[att_idx], count[n], ax=ax[1], label=f"N={n}", scatter_kws={'alpha':0.5})
print(n)
print(linregress(np.array(att[3])[att_idx], count[n]))
# ax[1].plot(np.array(att[3])[att_idx], count[n])
ratios = [r"$\frac{{{0:d}}}{{{1:d}}}$".format(*[int(x) for x in att[2][i]]) for i in att_idx]
ax[0].set_xticks(range(n_att))
ax[0].set_xticklabels(ratios)
ax[0].tick_params(axis='x', which='major', pad=8)
ax[1].set_xticks(np.arange(10, 80, 10))
ax[0].set_xlabel('Interval frequency ratio')
ax[1].set_xlabel('Harmonicity Score')
for a in ax:
a.set_ylabel('Frequency')
ax[0].legend(loc='upper right', frameon=False, ncol=2)
ax[1].legend(loc='upper left', frameon=False, ncol=1)
for i, a in enumerate(ax):
a.text(-.10, 1.05, string.ascii_uppercase[i], transform=a.transAxes, weight='bold', fontsize=16)
plt.savefig(os.path.join(FIG_DIR, 'harmonic_intervals.pdf'), bbox_inches='tight')
############################
### FIG 11 ###########
############################
def scale_variability(df):
fig, ax = plt.subplots(3,3, figsize=( 8,12))
plt.subplots_adjust(hspace=0.50) #wspace=0.3, hspace=0.2)
lbls = [r"$C_{{\textrm{{{0}}}}}$".format(c) for c in ["TRANS", "FIF", "HAR"]]
C = [0, 1., 0]
for i, bias in enumerate(['distI_2_0', 'Nim5_r0.0_w10', 'hs_n1_w10']):
if i >=1:
sns.distplot(1./(C[i]+df.loc[df.n_notes==7, bias]), ax=ax[0,i], label='DAT')
sns.distplot(1./(C[i]+df.loc[(df.n_notes==7)&(df.Culture=='Gamelan'), bias]), ax=ax[0,i], label='Pelog')
else:
sns.distplot(df.loc[df.n_notes==7, bias], ax=ax[0,i], label='DAT')
sns.distplot(df.loc[(df.n_notes==7)&(df.Culture=='Gamelan'), bias], ax=ax[0,i], label='Pelog')
ax[0,i].set_xlabel(lbls[i])
ax[0,0].legend(loc='upper right', bbox_to_anchor=(2.8, 1.35), frameon=False, ncol=2)
cat = ['pair_ints', 'scale']
indices = [df.loc[df.Culture=='Thai'].index,
df.loc[(df.n_notes==5)&(df.Culture=='Gamelan')].index,
df.loc[(df.n_notes==7)&(df.Culture=='Gamelan')].index]
labels = ['Thai', 'Slendro', 'Pelog']
bins = [np.arange(0, 350, 20), np.arange(0, 1270, 20), np.arange(0, 1270, 20)]
e5 = 1200./5.
e7 = 1200./7.
e9 = 1200./9.
X_arr = [[e7], np.arange(e7, 1100, e7),
[e5], np.arange(e5, 1100, e5),
[e9, e9*2], np.arange(e9, 1100, e9)]
Y_arr = [.03, .03, .012, .004, .005, .003]
for i, idx in enumerate(indices):
for j in range(2):
sns.distplot(utils.extract_floats_from_string(df.loc[idx, cat[j]]), ax=ax[j+1,i], bins=bins[j], kde=False, norm_hist=True)
X = X_arr[i*2+j]
for x in X:
ax[j+1,i].plot([x]*2, [0, Y_arr[j*3+i]], '-', color='k')
ax[1,i].set_title(labels[i])
ax[1,i].set_xlabel(r'$I_A$ / cents')
ax[2,i].set_xlabel("Notes in scale / cents")
ax[1,i].set_xticks(range(0,300,100))
ax[2,i].set_xticks(range(0,1300,400))
for a in ax.ravel():
a.set_yticks([])
for a in ax[:,0]:
a.set_ylabel("Probability")
ax[1,0].text( 80, 0.020, r'$\frac{1200}{7}$')
ax[1,1].text(150, 0.020, r'$\frac{1200}{5}$')
ax[1,2].text( 40, 0.008, r'$\frac{1200}{9}$')
ax[1,2].text(180, 0.008, r'$\frac{2400}{9}$')
for i, a in enumerate(ax[:,0]):
a.text(-.20, 1.05, string.ascii_uppercase[i], transform=a.transAxes, weight='bold', fontsize=16)
plt.savefig(os.path.join(FIG_DIR, 'scale_variability.pdf'), bbox_inches='tight')
############################
### FIG 12 ###########
############################
def database_sensitivity(paths, resamp_conf, X='JSD', Y='fr_10', mean='euc'):
# df = pd.read_feather(os.path.join(BASE_DIR,'Processed/database_sensitivity.feather'))
col1 = RdYlGn_11.hex_colors
col2 = Paired_12.hex_colors
col = [col2[5], col1[4], col1[6], col2[3], col2[1], col2[7], 'k']
leg_lbls = ['FIF', r'$\text{HAR}^{3}$', r'$\text{HAR}^{2}$', r'$\text{HAR}$', 'TRANS', 'MIN', 'RAN']
lbls = ['FIF', 'HAR3', 'HAR2', 'HAR', 'TRANS', 'MIN', 'RAN']
bias_group = ['im5', 'HAR3', 'HAR2', 'HAR', 'distI', 'none', 'none']
titles = [r'Theory', r'Measured', r'$n=0.4S$', r'$n=0.6S$', r'$n=0.8S$']
resamp_keys = ['theory', 'measured', 'frac0.4', 'frac0.6', 'frac0.8']
lbls2 = ['FIF', 'HAR3', 'HAR2', 'HAR', 'TRANS', 'MIN', 'RAN']
fig, ax = plt.subplots(3,2, sharex=True, sharey=True, figsize=(11,15))
plt.subplots_adjust(wspace=0.10, hspace=0.30)
ax = ax.reshape(ax.size)
samples = np.array(['theory', 'instrument'] + [f"sample_f{frac:3.1f}_{i:02d}" for frac in [0.4, 0.6, 0.8] for i in range(10)])
idx = [[0], [1]] + [list(range(2+i*10,2+(i+1)*10)) for i in range(3)]
for i, l in enumerate(lbls):
for j in range(5):
Xval = [resamp_conf[resamp_keys[j]][l]['jsd_int']['mean']['mean']]
Xerr = [[Xval[0] - resamp_conf[resamp_keys[j]][l]['jsd_int']['mean']['lo']],
[resamp_conf[resamp_keys[j]][l]['jsd_int']['mean']['hi'] - Xval[0]]]
Yval = [resamp_conf[resamp_keys[j]][l]['fD']['mean']['mean']]
Yerr = [[Yval[0] - resamp_conf[resamp_keys[j]][l]['fD']['mean']['lo']],
[resamp_conf[resamp_keys[j]][l]['fD']['mean']['hi'] - Yval[0]]]
ax[j].errorbar(Xval, Yval, xerr=Xerr, yerr=Yerr, color=col[i], fmt='o', label=leg_lbls[i], mec='k', alpha=0.7, ecolor='k', ms=10)
ax[j].plot(paths[lbls2[i]][X][0], paths[lbls2[i]][Y][0], 'o', color='w', mec=col[i], label=leg_lbls[i], ms=10)
fig.delaxes(ax[5])
for i, a in enumerate(ax[:5]):
a.set_title(titles[i])
a.set_xlabel(r'$\textrm{JSD}$')
a.set_ylabel(r'$f_{\textrm{D}}$')
a.tick_params(axis='both', which='major', direction='in', length=6, width=2, pad=8)
# a.set_xlim(3.0, 9.8)
a.set_xticks(np.arange(.1,.5,.1))
a.set_xticklabels([round(x,1) for x in np.arange(.1,.5,.1)])
# a.set_xticklabels(np.arange(4, 10, 2))
# a.set_yticks(np.arange(0, 0.5, 0.2))
for tk in a.get_xticklabels():
tk.set_visible(True)
ax[4].text(0.51, 0.6, r"$\textsc{\larger{dat}}$")
ax[4].text(0.63, 0.6, r"Subsample")
ax[4].legend(bbox_to_anchor=(2.1, 0.9), frameon=False, ncol=2)
fig.savefig(os.path.join(FIG_DIR, 'database_sensitivity.pdf'), bbox_inches='tight')
############################
### FIG 13 ###########
############################
def essen_collection():
fig, ax = plt.subplots()
all_df, europe_df = [], []
df =
|
pd.read_feather("/home/johnmcbride/projects/ABCnotation/Data/Essen/n_notes.feather")
|
pandas.read_feather
|
import pandas as pd
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.preprocessing import LabelEncoder
train = pd.read_csv("../input/train.csv")
test =
|
pd.read_csv("../input/test.csv")
|
pandas.read_csv
|
import hashlib
import math
import numpy as np
import pprint
import pytest
import random
import re
import subprocess
import sys
import tempfile
import json
from catboost import (
CatBoost,
CatBoostClassifier,
CatBoostRegressor,
CatBoostError,
EFstrType,
FeaturesData,
Pool,
cv,
sum_models,
train,)
from catboost.eval.catboost_evaluation import CatboostEvaluation, EvalType
from catboost.utils import eval_metric, create_cd, get_roc_curve, select_threshold
from catboost.utils import DataMetaInfo, TargetStats, compute_training_options
import os.path
from pandas import read_table, DataFrame, Series, Categorical
from six import PY3
from six.moves import xrange
from catboost_pytest_lib import (
DelayedTee,
binary_path,
data_file,
local_canonical_file,
permute_dataset_columns,
remove_time_from_json,
test_output_path,
generate_random_labeled_set
)
if sys.version_info.major == 2:
import cPickle as pickle
else:
import _pickle as pickle
pytest_plugins = "list_plugin",
fails_on_gpu = pytest.mark.fails_on_gpu
EPS = 1e-5
BOOSTING_TYPE = ['Ordered', 'Plain']
OVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']
NONSYMMETRIC = ['Lossguide', 'Depthwise']
TRAIN_FILE = data_file('adult', 'train_small')
TEST_FILE = data_file('adult', 'test_small')
CD_FILE = data_file('adult', 'train.cd')
NAN_TRAIN_FILE = data_file('adult_nan', 'train_small')
NAN_TEST_FILE = data_file('adult_nan', 'test_small')
NAN_CD_FILE = data_file('adult_nan', 'train.cd')
CLOUDNESS_TRAIN_FILE = data_file('cloudness_small', 'train_small')
CLOUDNESS_TEST_FILE = data_file('cloudness_small', 'test_small')
CLOUDNESS_CD_FILE = data_file('cloudness_small', 'train.cd')
QUERYWISE_TRAIN_FILE = data_file('querywise', 'train')
QUERYWISE_TEST_FILE = data_file('querywise', 'test')
QUERYWISE_CD_FILE = data_file('querywise', 'train.cd')
QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT = data_file('querywise', 'train.cd.group_weight')
QUERYWISE_CD_FILE_WITH_GROUP_ID = data_file('querywise', 'train.cd.query_id')
QUERYWISE_CD_FILE_WITH_SUBGROUP_ID = data_file('querywise', 'train.cd.subgroup_id')
QUERYWISE_TRAIN_PAIRS_FILE = data_file('querywise', 'train.pairs')
QUERYWISE_TRAIN_PAIRS_FILE_WITH_PAIR_WEIGHT = data_file('querywise', 'train.pairs.weighted')
QUERYWISE_TEST_PAIRS_FILE = data_file('querywise', 'test.pairs')
AIRLINES_5K_TRAIN_FILE = data_file('airlines_5K', 'train')
AIRLINES_5K_TEST_FILE = data_file('airlines_5K', 'test')
AIRLINES_5K_CD_FILE = data_file('airlines_5K', 'cd')
SMALL_CATEGORIAL_FILE = data_file('small_categorial', 'train')
SMALL_CATEGORIAL_CD_FILE = data_file('small_categorial', 'train.cd')
BLACK_FRIDAY_TRAIN_FILE = data_file('black_friday', 'train')
BLACK_FRIDAY_TEST_FILE = data_file('black_friday', 'test')
BLACK_FRIDAY_CD_FILE = data_file('black_friday', 'cd')
OUTPUT_MODEL_PATH = 'model.bin'
OUTPUT_COREML_MODEL_PATH = 'model.mlmodel'
OUTPUT_CPP_MODEL_PATH = 'model.cpp'
OUTPUT_PYTHON_MODEL_PATH = 'model.py'
OUTPUT_JSON_MODEL_PATH = 'model.json'
OUTPUT_ONNX_MODEL_PATH = 'model.onnx'
PREDS_PATH = 'predictions.npy'
PREDS_TXT_PATH = 'predictions.txt'
FIMP_NPY_PATH = 'feature_importance.npy'
FIMP_TXT_PATH = 'feature_importance.txt'
OIMP_PATH = 'object_importances.txt'
JSON_LOG_PATH = 'catboost_info/catboost_training.json'
TARGET_IDX = 1
CAT_FEATURES = [0, 1, 2, 4, 6, 8, 9, 10, 11, 12, 16]
model_diff_tool = binary_path("catboost/tools/model_comparator/model_comparator")
np.set_printoptions(legacy='1.13')
class LogStdout:
def __init__(self, file):
self.log_file = file
def __enter__(self):
self.saved_stdout = sys.stdout
sys.stdout = self.log_file
return self.saved_stdout
def __exit__(self, exc_type, exc_value, exc_traceback):
sys.stdout = self.saved_stdout
self.log_file.close()
def compare_canonical_models(model, diff_limit=0):
return local_canonical_file(model, diff_tool=[model_diff_tool, '--diff-limit', str(diff_limit)])
def map_cat_features(data, cat_features):
result = []
for i in range(data.shape[0]):
result.append([])
for j in range(data.shape[1]):
result[i].append(str(data[i, j]) if j in cat_features else data[i, j])
return result
def _check_shape(pool, object_count, features_count):
return np.shape(pool.get_features()) == (object_count, features_count)
def _check_data(data1, data2):
return np.all(np.isclose(data1, data2, rtol=0.001, equal_nan=True))
def _count_lines(afile):
with open(afile, 'r') as f:
num_lines = sum(1 for line in f)
return num_lines
def _generate_nontrivial_binary_target(num, seed=20181219, prng=None):
'''
Generate binary vector with non zero variance
:param num:
:return:
'''
if prng is None:
prng = np.random.RandomState(seed=seed)
def gen():
return prng.randint(0, 2, size=num)
if num <= 1:
return gen()
y = gen() # 0/1 labels
while y.min() == y.max():
y = gen()
return y
def _generate_random_target(num, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
return prng.random_sample((num,))
def set_random_weight(pool, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
pool.set_weight(prng.random_sample(pool.num_row()))
if pool.num_pairs() > 0:
pool.set_pairs_weight(prng.random_sample(pool.num_pairs()))
def verify_finite(result):
inf = float('inf')
for r in result:
assert(r == r)
assert(abs(r) < inf)
def append_param(metric_name, param):
return metric_name + (':' if ':' not in metric_name else ';') + param
# returns (features DataFrame, cat_feature_indices)
def load_pool_features_as_df(pool_file, cd_file, target_idx):
data = read_table(pool_file, header=None, dtype=str)
data.drop([target_idx], axis=1, inplace=True)
return (data, Pool(pool_file, column_description=cd_file).get_cat_feature_indices())
# Test cases begin here ########################################################
def test_load_file():
assert _check_shape(Pool(TRAIN_FILE, column_description=CD_FILE), 101, 17)
def test_load_list():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = map_cat_features(pool.get_features(), cat_features)
label = pool.get_label()
assert _check_shape(Pool(data, label, cat_features), 101, 17)
def test_load_ndarray():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = np.array(map_cat_features(pool.get_features(), cat_features))
label = np.array(pool.get_label())
assert _check_shape(Pool(data, label, cat_features), 101, 17)
@pytest.mark.parametrize('dataset', ['adult', 'adult_nan', 'querywise'])
def test_load_df_vs_load_from_file(dataset):
train_file, cd_file, target_idx, other_non_feature_columns = {
'adult': (TRAIN_FILE, CD_FILE, TARGET_IDX, []),
'adult_nan': (NAN_TRAIN_FILE, NAN_CD_FILE, TARGET_IDX, []),
'querywise': (QUERYWISE_TRAIN_FILE, QUERYWISE_CD_FILE, 2, [0, 1, 3, 4])
}[dataset]
pool1 = Pool(train_file, column_description=cd_file)
data = read_table(train_file, header=None)
labels = DataFrame(data.iloc[:, target_idx], dtype=np.float32)
data.drop([target_idx] + other_non_feature_columns, axis=1, inplace=True)
cat_features = pool1.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool1.get_features(), pool2.get_features())
assert _check_data([float(label) for label in pool1.get_label()], pool2.get_label())
def test_load_series():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
data = read_table(TRAIN_FILE, header=None)
labels = Series(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
data = Series(list(data.values))
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, labels, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert [int(label) for label in pool.get_label()] == pool2.get_label()
def test_pool_cat_features():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
assert np.all(pool.get_cat_feature_indices() == CAT_FEATURES)
def test_pool_cat_features_as_strings():
df = DataFrame(data=[[1, 2], [3, 4]], columns=['col1', 'col2'])
pool = Pool(df, cat_features=['col2'])
assert np.all(pool.get_cat_feature_indices() == [1])
data = [[1, 2, 3], [4, 5, 6]]
pool = Pool(data, feature_names=['col1', 'col2', 'col3'], cat_features=['col2', 'col3'])
assert np.all(pool.get_cat_feature_indices() == [1, 2])
data = [[1, 2, 3], [4, 5, 6]]
with pytest.raises(CatBoostError):
Pool(data, cat_features=['col2', 'col3'])
def test_load_generated():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = np.round(prng.normal(size=pool_size), decimals=3)
label = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool = Pool(data, label)
assert _check_data(pool.get_features(), data)
assert _check_data(pool.get_label(), label)
def test_load_dumps():
pool_size = (100, 10)
prng = np.random.RandomState(seed=20181219)
data = prng.randint(10, size=pool_size)
labels = _generate_nontrivial_binary_target(pool_size[0], prng=prng)
pool1 = Pool(data, labels)
lines = []
for i in range(len(data)):
line = [str(labels[i])] + [str(x) for x in data[i]]
lines.append('\t'.join(line))
text = '\n'.join(lines)
with open('test_data_dumps', 'w') as f:
f.write(text)
pool2 = Pool('test_data_dumps')
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_label() == [int(label) for label in pool2.get_label()]
def test_dataframe_with_pandas_categorical_columns():
df = DataFrame()
df['num_feat_0'] = [0, 1, 0, 2, 3, 1, 2]
df['num_feat_1'] = [0.12, 0.8, 0.33, 0.11, 0.0, 1.0, 0.0]
df['cat_feat_2'] = Series(['A', 'B', 'A', 'C', 'A', 'A', 'A'], dtype='category')
df['cat_feat_3'] = Series(['x', 'x', 'y', 'y', 'y', 'x', 'x'])
df['cat_feat_4'] = Categorical(
['large', 'small', 'medium', 'large', 'small', 'small', 'medium'],
categories=['small', 'medium', 'large'],
ordered=True
)
df['cat_feat_5'] = [0, 1, 0, 2, 3, 1, 2]
labels = [0, 1, 1, 0, 1, 0, 1]
model = CatBoostClassifier(iterations=2)
model.fit(X=df, y=labels, cat_features=[2, 3, 4, 5])
pred = model.predict(df)
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
# feature_matrix is (doc_count x feature_count)
def get_features_data_from_matrix(feature_matrix, cat_feature_indices, order='C'):
object_count = len(feature_matrix)
feature_count = len(feature_matrix[0])
cat_feature_count = len(cat_feature_indices)
num_feature_count = feature_count - cat_feature_count
result_num = np.empty((object_count, num_feature_count), dtype=np.float32, order=order)
result_cat = np.empty((object_count, cat_feature_count), dtype=object, order=order)
for object_idx in xrange(object_count):
num_feature_idx = 0
cat_feature_idx = 0
for feature_idx in xrange(len(feature_matrix[object_idx])):
if (cat_feature_idx < cat_feature_count) and (cat_feature_indices[cat_feature_idx] == feature_idx):
# simplified handling of transformation to bytes for tests
result_cat[object_idx, cat_feature_idx] = (
feature_matrix[object_idx, feature_idx]
if isinstance(feature_matrix[object_idx, feature_idx], bytes)
else str(feature_matrix[object_idx, feature_idx]).encode('utf-8')
)
cat_feature_idx += 1
else:
result_num[object_idx, num_feature_idx] = float(feature_matrix[object_idx, feature_idx])
num_feature_idx += 1
return FeaturesData(num_feature_data=result_num, cat_feature_data=result_cat)
def get_features_data_from_file(data_file, drop_columns, cat_feature_indices, order='C'):
data_matrix_from_file = read_table(data_file, header=None, dtype=str)
data_matrix_from_file.drop(drop_columns, axis=1, inplace=True)
return get_features_data_from_matrix(np.array(data_matrix_from_file), cat_feature_indices, order)
def compare_flat_index_and_features_data_pools(flat_index_pool, features_data_pool):
assert flat_index_pool.shape == features_data_pool.shape
cat_feature_indices = flat_index_pool.get_cat_feature_indices()
num_feature_count = flat_index_pool.shape[1] - len(cat_feature_indices)
flat_index_pool_features = flat_index_pool.get_features()
features_data_pool_features = features_data_pool.get_features()
for object_idx in xrange(flat_index_pool.shape[0]):
num_feature_idx = 0
cat_feature_idx = 0
for flat_feature_idx in xrange(flat_index_pool.shape[1]):
if (
(cat_feature_idx < len(cat_feature_indices))
and (cat_feature_indices[cat_feature_idx] == flat_feature_idx)
):
# simplified handling of transformation to bytes for tests
assert (flat_index_pool_features[object_idx][flat_feature_idx] ==
features_data_pool_features[object_idx][num_feature_count + cat_feature_idx])
cat_feature_idx += 1
else:
assert np.isclose(
flat_index_pool_features[object_idx][flat_feature_idx],
features_data_pool_features[object_idx][num_feature_idx],
rtol=0.001,
equal_nan=True
)
num_feature_idx += 1
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_from_features_data_vs_load_from_files(order):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
pool_from_features_data = Pool(data=features_data)
compare_flat_index_and_features_data_pools(pool_from_files, pool_from_features_data)
def test_features_data_with_empty_objects():
fd = FeaturesData(
cat_feature_data=np.empty((0, 4), dtype=object)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 4
assert fd.get_num_feature_count() == 0
assert fd.get_cat_feature_count() == 4
assert fd.get_feature_names() == [''] * 4
fd = FeaturesData(
num_feature_data=np.empty((0, 2), dtype=np.float32),
num_feature_names=['f0', 'f1']
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 2
assert fd.get_num_feature_count() == 2
assert fd.get_cat_feature_count() == 0
assert fd.get_feature_names() == ['f0', 'f1']
fd = FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object),
num_feature_data=np.empty((0, 3), dtype=np.float32)
)
assert fd.get_object_count() == 0
assert fd.get_feature_count() == 5
assert fd.get_num_feature_count() == 3
assert fd.get_cat_feature_count() == 2
assert fd.get_feature_names() == [''] * 5
def test_features_data_names():
# empty specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == [''] * 5
# full specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', 'shop', 'search']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
num_feature_names=['weight', 'price', 'volume']
)
assert fd.get_feature_names() == ['weight', 'price', 'volume', '', '']
# partial specification of names
fd = FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32),
)
assert fd.get_feature_names() == ['', '', '', 'shop', 'search']
def compare_pools_from_features_data_and_generic_matrix(
features_data,
generic_matrix,
cat_features_indices,
feature_names=None
):
pool1 = Pool(data=features_data)
pool2 = Pool(data=generic_matrix, cat_features=cat_features_indices, feature_names=feature_names)
assert _check_data(pool1.get_features(), pool2.get_features())
assert pool1.get_cat_feature_indices() == pool2.get_cat_feature_indices()
assert pool1.get_feature_names() == pool2.get_feature_names()
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_features_data_good(order):
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(cat_feature_data=np.empty((0, 4), dtype=object, order=order)),
np.empty((0, 4), dtype=object),
cat_features_indices=[0, 1, 2, 3]
)
# 0 objects
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.empty((0, 2), dtype=object, order=order),
cat_feature_names=['cat0', 'cat1'],
num_feature_data=np.empty((0, 3), dtype=np.float32, order=order),
),
np.empty((0, 5), dtype=object),
cat_features_indices=[3, 4],
feature_names=['', '', '', 'cat0', 'cat1']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order)
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order)
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4]
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search']
),
[[b'amazon', b'bing'], [b'ebay', b'google']],
cat_features_indices=[0, 1],
feature_names=['shop', 'search']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]],
cat_features_indices=[],
feature_names=['weight', 'price', 'volume']
)
compare_pools_from_features_data_and_generic_matrix(
FeaturesData(
cat_feature_data=np.array([[b'amazon', b'bing'], [b'ebay', b'google']], dtype=object, order=order),
cat_feature_names=['shop', 'search'],
num_feature_data=np.array([[1.0, 2.0, 3.0], [22.0, 7.1, 10.2]], dtype=np.float32, order=order),
num_feature_names=['weight', 'price', 'volume']
),
[[1.0, 2.0, 3.0, b'amazon', b'bing'], [22.0, 7.1, 10.2, b'ebay', b'google']],
cat_features_indices=[3, 4],
feature_names=['weight', 'price', 'volume', 'shop', 'search']
)
def test_features_data_bad():
# empty
with pytest.raises(CatBoostError):
FeaturesData()
# names w/o data
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=[[b'amazon', b'bing']], num_feature_names=['price'])
# bad matrix type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=[[b'amazon', b'bing']],
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
# bad matrix shape
with pytest.raises(CatBoostError):
FeaturesData(num_feature_data=np.array([[[1.0], [2.0], [3.0]]], dtype=np.float32))
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([b'amazon', b'bing'], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float64)
)
# bad element type
with pytest.raises(CatBoostError):
FeaturesData(cat_feature_data=np.array(['amazon', 'bing']))
# bad names type
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'reddit']], dtype=object),
cat_feature_names=[None, 'news_aggregator']
)
# bad names length
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
cat_feature_names=['search_engine', 'news_aggregator']
)
# no features
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[], [], []], dtype=object),
num_feature_data=np.array([[], [], []], dtype=np.float32)
)
# number of objects is different
with pytest.raises(CatBoostError):
FeaturesData(
cat_feature_data=np.array([[b'google'], [b'bing']], dtype=object),
num_feature_data=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
def test_predict_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_regress(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_sklearn_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function='Logloss:border=0.5', task_type=task_type, devices='0')
model.fit(train_pool)
assert(model.is_fitted())
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_predict_class_raw(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_raw_predict_equals_to_model_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
assert(model.is_fitted())
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
assert np.all(np.isclose(model.get_test_eval(), pred, rtol=1.e-6))
@pytest.mark.parametrize('problem', ['Classifier', 'Regressor'])
def test_predict_and_predict_proba_on_single_object(problem):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
if problem == 'Classifier':
model = CatBoostClassifier(iterations=2)
else:
model = CatBoostRegressor(iterations=2)
model.fit(train_pool)
test_data = read_table(TEST_FILE, header=None)
test_data.drop([TARGET_IDX], axis=1, inplace=True)
pred = model.predict(test_data)
if problem == 'Classifier':
pred_probabilities = model.predict_proba(test_data)
random.seed(0)
for i in xrange(3): # just some indices
test_object_idx = random.randrange(test_data.shape[0])
assert pred[test_object_idx] == model.predict(test_data.values[test_object_idx])
if problem == 'Classifier':
assert np.array_equal(pred_probabilities[test_object_idx], model.predict_proba(test_data.values[test_object_idx]))
def test_model_pickling(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
pred = model.predict(test_pool, prediction_type='RawFormulaVal')
model_unpickled = pickle.loads(pickle.dumps(model))
pred_new = model_unpickled.predict(test_pool, prediction_type='RawFormulaVal')
assert all(pred_new == pred)
def test_fit_from_file(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
predictions1 = model.predict(train_pool)
model.fit(TRAIN_FILE, column_description=CD_FILE)
predictions2 = model.predict(train_pool)
assert all(predictions1 == predictions2)
assert 'train_finish_time' in model.get_metadata()
@fails_on_gpu(how='assert 0.019921323750168085 < EPS, where 0.019921323750168085 = abs((0.03378972364589572 - 0.053711047396063805))')
@pytest.mark.parametrize('order', ['C', 'F'], ids=['order=C', 'order=F'])
def test_fit_from_features_data(order, task_type):
pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(pool_from_files)
assert(model.is_fitted())
predictions_from_files = model.predict(pool_from_files)
features_data = get_features_data_from_file(
data_file=TRAIN_FILE,
drop_columns=[TARGET_IDX],
cat_feature_indices=pool_from_files.get_cat_feature_indices(),
order=order
)
model.fit(X=features_data, y=pool_from_files.get_label())
predictions_from_features_data = model.predict(Pool(features_data))
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert abs(prediction1 - prediction2) < EPS
def test_fit_from_empty_features_data(task_type):
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
with pytest.raises(CatBoostError):
model.fit(
X=FeaturesData(num_feature_data=np.empty((0, 2), dtype=np.float32)),
y=np.empty((0), dtype=np.int32)
)
def test_coreml_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
canon_pred = model.predict(test_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(canon_pred == coreml_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_coreml_import_export_one_hot_features(task_type):
train_pool = Pool(SMALL_CATEGORIAL_FILE, column_description=SMALL_CATEGORIAL_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 2, 'task_type': task_type, 'devices': '0', 'one_hot_max_size': 4})
model.fit(train_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml", pool=train_pool)
pred = model.predict(train_pool)
coreml_loaded_model = CatBoostRegressor()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
assert all(pred == coreml_loaded_model.predict(train_pool))
return compare_canonical_models(output_coreml_model_path)
@pytest.mark.parametrize('pool', ['adult', 'higgs'])
def test_convert_model_to_json(task_type, pool):
train_pool = Pool(data_file(pool, 'train_small'), column_description=data_file(pool, 'train.cd'))
test_pool = Pool(data_file(pool, 'test_small'), column_description=data_file(pool, 'train.cd'))
converted_model_path = test_output_path("converted_model.bin")
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
output_json_model_path = test_output_path(OUTPUT_JSON_MODEL_PATH)
model.save_model(output_model_path)
model.save_model(output_json_model_path, format="json")
model2 = CatBoost()
model2.load_model(output_json_model_path, format="json")
model2.save_model(converted_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
subprocess.check_call((model_diff_tool, output_model_path, converted_model_path, '--diff-limit', '0.000001'))
return compare_canonical_models(converted_model_path)
def test_coreml_cbm_import_export(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'RMSE', 'iterations': 20, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
canon_pred = model.predict(test_pool)
output_coreml_model_path = test_output_path(OUTPUT_COREML_MODEL_PATH)
model.save_model(output_coreml_model_path, format="coreml")
coreml_loaded_model = CatBoost()
coreml_loaded_model.load_model(output_coreml_model_path, format="coreml")
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
coreml_loaded_model.save_model(output_model_path)
cbm_loaded_model = CatBoost()
cbm_loaded_model.load_model(output_model_path)
assert all(canon_pred == cbm_loaded_model.predict(test_pool))
return compare_canonical_models(output_coreml_model_path)
def test_cpp_export_no_cat_features(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
def test_cpp_export_with_cat_features(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 20, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_cpp_model_path = test_output_path(OUTPUT_CPP_MODEL_PATH)
model.save_model(output_cpp_model_path, format="cpp")
return local_canonical_file(output_cpp_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_no_cat_features(task_type, iterations):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost({'iterations': iterations, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python")
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('iterations', [2, 40])
def test_export_to_python_with_cat_features(task_type, iterations):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': iterations, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=train_pool)
return local_canonical_file(output_python_model_path)
def test_export_to_python_with_cat_features_from_pandas(task_type):
model = CatBoost({'iterations': 5, 'task_type': task_type, 'devices': '0'})
X = DataFrame([[1, 2], [3, 4]], columns=['Num', 'Categ'])
y = [1, 0]
cat_features = [1]
model.fit(X, y, cat_features)
output_python_model_path = test_output_path(OUTPUT_PYTHON_MODEL_PATH)
model.save_model(output_python_model_path, format="python", pool=X)
return local_canonical_file(output_python_model_path)
@pytest.mark.parametrize('problem_type', ['binclass', 'multiclass', 'regression'])
def test_onnx_export(problem_type):
if problem_type == 'binclass':
loss_function = 'Logloss'
train_path = TRAIN_FILE
cd_path = CD_FILE
elif problem_type == 'multiclass':
loss_function = 'MultiClass'
train_path = CLOUDNESS_TRAIN_FILE
cd_path = CLOUDNESS_CD_FILE
elif problem_type == 'regression':
loss_function = 'RMSE'
train_path = TRAIN_FILE
cd_path = CD_FILE
else:
raise Exception('Unsupported problem_type: %s' % problem_type)
train_pool = Pool(train_path, column_description=cd_path)
model = CatBoost(
{
'task_type': 'CPU', # TODO(akhropov): GPU results are unstable, difficult to compare models
'loss_function': loss_function,
'iterations': 5,
'depth': 4,
# onnx format export does not yet support categorical features so ignore them
'ignored_features': train_pool.get_cat_feature_indices()
}
)
model.fit(train_pool)
output_onnx_model_path = test_output_path(OUTPUT_ONNX_MODEL_PATH)
model.save_model(
output_onnx_model_path,
format="onnx",
export_parameters={
'onnx_domain': 'ai.catboost',
'onnx_model_version': 1,
'onnx_doc_string': 'test model for problem_type %s' % problem_type,
'onnx_graph_name': 'CatBoostModel_for_%s' % problem_type
}
)
return compare_canonical_models(output_onnx_model_path)
def test_predict_class(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict(test_pool, prediction_type="Class")
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_zero_learning_rate(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0, task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(train_pool)
def test_predict_class_proba(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict_proba(test_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
@fails_on_gpu(how='assert 0.031045619651137835 < EPS, where 0.031045619651137835 = <function amax at ...')
@pytest.mark.parametrize('function_name', ['predict', 'predict_proba'])
def test_predict_funcs_from_features_data(function_name, task_type):
function = getattr(CatBoostClassifier, function_name)
train_pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool_from_files)
test_pool_from_files = Pool(TEST_FILE, column_description=CD_FILE)
predictions_from_files = function(model, test_pool_from_files)
train_features_data, test_features_data = [
get_features_data_from_file(
data_file=data_file,
drop_columns=[TARGET_IDX],
cat_feature_indices=train_pool_from_files.get_cat_feature_indices()
)
for data_file in [TRAIN_FILE, TEST_FILE]
]
model.fit(X=train_features_data, y=train_pool_from_files.get_label())
predictions_from_features_data = function(model, test_features_data)
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert np.max(np.abs(prediction1 - prediction2)) < EPS
# empty
empty_test_features_data = FeaturesData(
num_feature_data=np.empty((0, test_features_data.get_num_feature_count()), dtype=np.float32),
cat_feature_data=np.empty((0, test_features_data.get_cat_feature_count()), dtype=object)
)
empty_predictions = function(model, empty_test_features_data)
assert len(empty_predictions) == 0
def test_no_cat_in_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred1 = model.predict(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()))
pred2 = model.predict(Pool(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()), cat_features=train_pool.get_cat_feature_indices()))
assert _check_data(pred1, pred2)
def test_save_model(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoost({'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
model2 = CatBoost()
model2.load_model(output_model_path)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
def test_multiclass(task_type):
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_multiclass_classes_count_missed_classes(task_type):
prng = np.random.RandomState(seed=0)
pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice([1, 3], size=100))
classifier = CatBoostClassifier(classes_count=4, iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(pool)
classes = new_classifier.predict(pool)
assert pred.shape == (100, 4)
assert np.array(classes).all() in [1, 3]
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
@pytest.mark.parametrize('label_type', ['string', 'int'])
def test_multiclass_custom_class_labels(label_type, task_type):
if label_type == 'int':
train_labels = [1, 2]
elif label_type == 'string':
train_labels = ['Class1', 'Class2']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice(train_labels, size=100))
test_pool = Pool(prng.random_sample(size=(50, 10)))
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (50, 2)
assert all(((class1 in train_labels) for class1 in classes))
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_multiclass_custom_class_labels_from_files(task_type):
labels = ['a', 'b', 'c', 'd']
cd_path = test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = test_output_path('train.txt')
np.savetxt(train_path, generate_random_labeled_set(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = test_output_path('test.txt')
np.savetxt(test_path, generate_random_labeled_set(25, 10, labels, prng=prng), fmt='%s', delimiter='\t')
train_pool = Pool(train_path, column_description=cd_path)
test_pool = Pool(test_path, column_description=cd_path)
classifier = CatBoostClassifier(iterations=2, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (25, 4)
assert all(((class1 in labels) for class1 in classes))
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_class_names(task_type):
class_names = ['Small', 'Medium', 'Large']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice(class_names, size=100))
test_pool = Pool(prng.random_sample(size=(25, 10)))
classifier = CatBoostClassifier(
iterations=2,
loss_function='MultiClass',
class_names=class_names,
thread_count=8,
task_type=task_type,
devices='0'
)
classifier.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
classifier.save_model(output_model_path)
new_classifier = CatBoostClassifier()
new_classifier.load_model(output_model_path)
pred = new_classifier.predict_proba(test_pool)
classes = new_classifier.predict(test_pool)
assert pred.shape == (25, 3)
assert all(((class1 in class_names) for class1 in classes))
assert sorted(classifier.classes_) == sorted(class_names)
preds_path = test_output_path(PREDS_TXT_PATH)
np.savetxt(preds_path, np.array(pred), fmt='%.8f')
return local_canonical_file(preds_path)
def test_inconsistent_labels_and_class_names():
class_names = ['Small', 'Medium', 'Large']
prng = np.random.RandomState(seed=0)
train_pool = Pool(prng.random_sample(size=(100, 10)), label=prng.choice([0, 1, 2], size=100))
classifier = CatBoostClassifier(
iterations=2,
loss_function='MultiClass',
class_names=class_names,
)
with pytest.raises(CatBoostError):
classifier.fit(train_pool)
@pytest.mark.parametrize(
'features_dtype',
['str', 'np.float32'],
ids=['features_dtype=str', 'features_dtype=np.float32']
)
def test_querywise(features_dtype, task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE)
model = CatBoost(params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 2]
train_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(eval(features_dtype))
df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(eval(features_dtype))
model.fit(train_data, train_target, group_id=train_query_id)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_group_weight(task_type):
train_pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT)
test_pool = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_WEIGHT)
model = CatBoost(params={'loss_function': 'YetiRank', 'iterations': 10, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_query_weight = df.loc[:, 0]
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 2]
train_data = df.drop([0, 1, 2, 3, 4], axis=1).astype(str)
df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_query_weight = df.loc[:, 0]
test_query_id = df.loc[:, 1]
test_data = Pool(df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32), group_id=test_query_id, group_weight=test_query_weight)
model.fit(train_data, train_target, group_id=train_query_id, group_weight=train_query_weight)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_zero_baseline(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
baseline = np.zeros(pool.num_row())
pool.set_baseline(baseline)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ones_weight(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.ones(pool.num_row())
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_non_ones_weight(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.arange(1, pool.num_row() + 1)
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ones_weight_equal_to_nonspecified_weight(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
predictions = []
for set_weights in [False, True]:
if set_weights:
weight = np.ones(train_pool.num_row())
train_pool.set_weight(weight)
model.fit(train_pool)
predictions.append(model.predict(test_pool))
assert _check_data(predictions[0], predictions[1])
def test_py_data_group_id(task_type):
train_pool_from_files = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_ID)
test_pool_from_files = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_GROUP_ID)
model = CatBoost(
params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 4, 'task_type': task_type, 'devices': '0'}
)
model.fit(train_pool_from_files)
predictions_from_files = model.predict(test_pool_from_files)
train_df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_target = train_df.loc[:, 2]
raw_train_group_id = train_df.loc[:, 1]
train_data = train_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32)
test_df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = Pool(test_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32))
for group_id_func in (int, str, lambda id: 'myid_' + str(id)):
train_group_id = [group_id_func(group_id) for group_id in raw_train_group_id]
model.fit(train_data, train_target, group_id=train_group_id)
predictions_from_py_data = model.predict(test_data)
assert _check_data(predictions_from_files, predictions_from_py_data)
def test_py_data_subgroup_id(task_type):
train_pool_from_files = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE_WITH_SUBGROUP_ID)
test_pool_from_files = Pool(QUERYWISE_TEST_FILE, column_description=QUERYWISE_CD_FILE_WITH_SUBGROUP_ID)
model = CatBoost(
params={'loss_function': 'QueryRMSE', 'iterations': 2, 'thread_count': 4, 'task_type': task_type, 'devices': '0'}
)
model.fit(train_pool_from_files)
predictions_from_files = model.predict(test_pool_from_files)
train_df = read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
train_group_id = train_df.loc[:, 1]
raw_train_subgroup_id = train_df.loc[:, 4]
train_target = train_df.loc[:, 2]
train_data = train_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32)
test_df = read_table(QUERYWISE_TEST_FILE, delimiter='\t', header=None)
test_data = Pool(test_df.drop([0, 1, 2, 3, 4], axis=1).astype(np.float32))
for subgroup_id_func in (int, str, lambda id: 'myid_' + str(id)):
train_subgroup_id = [subgroup_id_func(subgroup_id) for subgroup_id in raw_train_subgroup_id]
model.fit(train_data, train_target, group_id=train_group_id, subgroup_id=train_subgroup_id)
predictions_from_py_data = model.predict(test_data)
assert _check_data(predictions_from_files, predictions_from_py_data)
def test_fit_data(task_type):
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
eval_pool = Pool(CLOUDNESS_TEST_FILE, column_description=CLOUDNESS_CD_FILE)
base_model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function="MultiClass", task_type=task_type, devices='0')
base_model.fit(pool)
baseline = np.array(base_model.predict(pool, prediction_type='RawFormulaVal'))
eval_baseline = np.array(base_model.predict(eval_pool, prediction_type='RawFormulaVal'))
eval_pool.set_baseline(eval_baseline)
model = CatBoostClassifier(iterations=2, learning_rate=0.03, loss_function="MultiClass")
data = map_cat_features(pool.get_features(), pool.get_cat_feature_indices())
model.fit(data, pool.get_label(), pool.get_cat_feature_indices(), sample_weight=np.arange(1, pool.num_row() + 1), baseline=baseline, use_best_model=True, eval_set=eval_pool)
pred = model.predict_proba(eval_pool)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_ntree_limit(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=100, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
pred = model.predict_proba(test_pool, ntree_end=10)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(pred))
return local_canonical_file(preds_path)
def test_staged_predict(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool)
preds = []
for pred in model.staged_predict(test_pool):
preds.append(pred)
preds_path = test_output_path(PREDS_PATH)
np.save(preds_path, np.array(preds))
return local_canonical_file(preds_path)
@pytest.mark.parametrize('problem', ['Classifier', 'Regressor'])
def test_staged_predict_and_predict_proba_on_single_object(problem):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
if problem == 'Classifier':
model = CatBoostClassifier(iterations=10)
else:
model = CatBoostRegressor(iterations=10)
model.fit(train_pool)
test_data = read_table(TEST_FILE, header=None)
test_data.drop([TARGET_IDX], axis=1, inplace=True)
preds = []
for pred in model.staged_predict(test_data):
preds.append(pred)
if problem == 'Classifier':
pred_probabilities = []
for pred_probabilities_for_iteration in model.staged_predict_proba(test_data):
pred_probabilities.append(pred_probabilities_for_iteration)
random.seed(0)
for i in xrange(3): # just some indices
test_object_idx = random.randrange(test_data.shape[0])
single_object_preds = []
for pred in model.staged_predict(test_data.values[test_object_idx]):
single_object_preds.append(pred)
assert len(preds) == len(single_object_preds)
for iteration in xrange(len(preds)):
assert preds[iteration][test_object_idx] == single_object_preds[iteration]
if problem == 'Classifier':
single_object_pred_probabilities = []
for pred_probabilities_for_iteration in model.staged_predict_proba(test_data.values[test_object_idx]):
single_object_pred_probabilities.append(pred_probabilities_for_iteration)
assert len(pred_probabilities) == len(single_object_pred_probabilities)
for iteration in xrange(len(pred_probabilities)):
assert np.array_equal(pred_probabilities[iteration][test_object_idx], single_object_pred_probabilities[iteration])
@fails_on_gpu(how='assert 1.0 < EPS')
@pytest.mark.parametrize('staged_function_name', ['staged_predict', 'staged_predict_proba'])
def test_staged_predict_funcs_from_features_data(staged_function_name, task_type):
staged_function = getattr(CatBoostClassifier, staged_function_name)
fit_iterations = 10
train_pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=fit_iterations, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool_from_files)
test_pool_from_files = Pool(TEST_FILE, column_description=CD_FILE)
predictions_from_files = []
for prediction in staged_function(model, test_pool_from_files):
predictions_from_files.append(prediction)
train_features_data, test_features_data = [
get_features_data_from_file(
data_file=data_file,
drop_columns=[TARGET_IDX],
cat_feature_indices=train_pool_from_files.get_cat_feature_indices()
)
for data_file in [TRAIN_FILE, TEST_FILE]
]
model.fit(X=train_features_data, y=train_pool_from_files.get_label())
predictions_from_features_data = []
for prediction in staged_function(model, test_features_data):
predictions_from_features_data.append(prediction)
for prediction1, prediction2 in zip(predictions_from_files, predictions_from_features_data):
assert np.max(np.abs(prediction1 - prediction2)) < EPS
# empty
empty_test_features_data = FeaturesData(
num_feature_data=np.empty((0, test_features_data.get_num_feature_count()), dtype=np.float32),
cat_feature_data=np.empty((0, test_features_data.get_cat_feature_count()), dtype=object)
)
empty_predictions = []
for prediction in staged_function(model, empty_test_features_data):
assert np.shape(prediction) == ((0, 2) if staged_function_name == 'staged_predict_proba' else (0, ))
empty_predictions.append(prediction)
assert len(empty_predictions) == fit_iterations
def test_invalid_loss_base(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({"loss_function": "abcdef", 'task_type': task_type, 'devices': '0'})
with pytest.raises(CatBoostError):
model.fit(pool)
def test_invalid_loss_classifier(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(loss_function="abcdef", task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(pool)
def test_invalid_loss_regressor(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(loss_function="fee", task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(pool)
def test_fit_no_label(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(pool.get_features())
def test_predict_without_fit(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.predict(pool)
def test_real_numbers_cat_features():
prng = np.random.RandomState(seed=20181219)
data = prng.rand(100, 10)
label = _generate_nontrivial_binary_target(100, prng=prng)
with pytest.raises(CatBoostError):
Pool(data, label, [1, 2])
def test_wrong_ctr_for_classification(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(ctr_description=['Borders:TargetBorderCount=5:TargetBorderType=Uniform'], task_type=task_type, devices='0')
with pytest.raises(CatBoostError):
model.fit(pool)
def test_wrong_feature_count(task_type):
prng = np.random.RandomState(seed=20181219)
data = prng.rand(100, 10)
label = _generate_nontrivial_binary_target(100, prng=prng)
model = CatBoostClassifier(task_type=task_type, devices='0')
model.fit(data, label)
with pytest.raises(CatBoostError):
model.predict(data[:, :-1])
def test_wrong_params_classifier():
with pytest.raises(TypeError):
CatBoostClassifier(wrong_param=1)
def test_wrong_params_base():
prng = np.random.RandomState(seed=20181219)
data = prng.rand(100, 10)
label = _generate_nontrivial_binary_target(100, prng=prng)
model = CatBoost({'wrong_param': 1})
with pytest.raises(CatBoostError):
model.fit(data, label)
def test_wrong_params_regressor():
with pytest.raises(TypeError):
CatBoostRegressor(wrong_param=1)
def test_wrong_kwargs_base():
prng = np.random.RandomState(seed=20181219)
data = prng.rand(100, 10)
label = _generate_nontrivial_binary_target(100, prng=prng)
model = CatBoost({'kwargs': {'wrong_param': 1}})
with pytest.raises(CatBoostError):
model.fit(data, label)
def test_duplicate_params_base():
prng = np.random.RandomState(seed=20181219)
data = prng.rand(100, 10)
label = _generate_nontrivial_binary_target(100, prng=prng)
model = CatBoost({'iterations': 100, 'n_estimators': 50})
with pytest.raises(CatBoostError):
model.fit(data, label)
def test_duplicate_params_classifier():
prng = np.random.RandomState(seed=20181219)
data = prng.rand(100, 10)
label = _generate_nontrivial_binary_target(100, prng=prng)
model = CatBoostClassifier(depth=3, max_depth=4, random_seed=42, random_state=12)
with pytest.raises(CatBoostError):
model.fit(data, label)
def test_duplicate_params_regressor():
prng = np.random.RandomState(seed=20181219)
data = prng.rand(100, 10)
label = _generate_nontrivial_binary_target(100, prng=prng)
model = CatBoostRegressor(learning_rate=0.1, eta=0.03, border_count=10, max_bin=12)
with pytest.raises(CatBoostError):
model.fit(data, label)
def test_custom_eval():
class LoglossMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in xrange(len(approx)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += w * (target[i] * approx[i] - math.log(1 + math.exp(approx[i])))
return error_sum, weight_sum
train_pool = Pool(data=TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(data=TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, use_best_model=True, eval_metric=LoglossMetric())
model.fit(train_pool, eval_set=test_pool)
pred1 = model.predict(test_pool)
model2 = CatBoostClassifier(iterations=5, use_best_model=True, eval_metric="Logloss")
model2.fit(train_pool, eval_set=test_pool)
pred2 = model2.predict(test_pool)
for p1, p2 in zip(pred1, pred2):
assert abs(p1 - p2) < EPS
@fails_on_gpu(how='cuda/train_lib/train.cpp:283: Error: loss function is not supported for GPU learning Custom')
def test_custom_objective(task_type):
class LoglossObjective(object):
def calc_ders_range(self, approxes, targets, weights):
assert len(approxes) == len(targets)
if weights is not None:
assert len(weights) == len(approxes)
exponents = []
for index in xrange(len(approxes)):
exponents.append(math.exp(approxes[index]))
result = []
for index in xrange(len(targets)):
p = exponents[index] / (1 + exponents[index])
der1 = (1 - p) if targets[index] > 0.0 else -p
der2 = -p * (1 - p)
if weights is not None:
der1 *= weights[index]
der2 *= weights[index]
result.append((der1, der2))
return result
train_pool = Pool(data=TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(data=TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, learning_rate=0.03, use_best_model=True,
loss_function=LoglossObjective(), eval_metric="Logloss",
# Leaf estimation method and gradient iteration are set to match
# defaults for Logloss.
leaf_estimation_method="Newton", leaf_estimation_iterations=1, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
pred1 = model.predict(test_pool, prediction_type='RawFormulaVal')
model2 = CatBoostClassifier(iterations=5, learning_rate=0.03, use_best_model=True, loss_function="Logloss", leaf_estimation_method="Newton", leaf_estimation_iterations=1)
model2.fit(train_pool, eval_set=test_pool)
pred2 = model2.predict(test_pool, prediction_type='RawFormulaVal')
for p1, p2 in zip(pred1, pred2):
assert abs(p1 - p2) < EPS
def test_pool_after_fit(task_type):
pool1 = Pool(TRAIN_FILE, column_description=CD_FILE)
pool2 = Pool(TRAIN_FILE, column_description=CD_FILE)
assert _check_data(pool1.get_features(), pool2.get_features())
model = CatBoostClassifier(iterations=5, task_type=task_type, devices='0')
model.fit(pool2)
assert _check_data(pool1.get_features(), pool2.get_features())
def test_priors(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(
iterations=5,
learning_rate=0.03,
has_time=True,
ctr_description=["Borders:Prior=0:Prior=0.6:Prior=1:Prior=5",
("FeatureFreq" if task_type == 'GPU' else "Counter") + ":Prior=0:Prior=0.6:Prior=1:Prior=5"],
task_type=task_type, devices='0',
)
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ignored_features(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model1 = CatBoostClassifier(iterations=5, learning_rate=0.03, task_type=task_type, devices='0', max_ctr_complexity=1, ignored_features=[1, 2, 3])
model2 = CatBoostClassifier(iterations=5, learning_rate=0.03, task_type=task_type, devices='0', max_ctr_complexity=1)
model1.fit(train_pool)
model2.fit(train_pool)
predictions1 = model1.predict_proba(test_pool)
predictions2 = model2.predict_proba(test_pool)
assert not _check_data(predictions1, predictions2)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model1.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_class_weights(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, learning_rate=0.03, class_weights=[1, 2], task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_classification_ctr(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, learning_rate=0.03,
ctr_description=['Borders', 'FeatureFreq' if task_type == 'GPU' else 'Counter'],
task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
@fails_on_gpu(how="libs/options/catboost_options.cpp:280: Error: GPU doesn't not support target binarization per CTR description currently. Please use ctr_target_border_count option instead")
def test_regression_ctr(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=5, learning_rate=0.03, ctr_description=['Borders:TargetBorderCount=5:TargetBorderType=Uniform', 'Counter'], task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_ctr_target_border_count(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=5, learning_rate=0.03, ctr_target_border_count=5, task_type=task_type, devices='0')
model.fit(pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_copy_model():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model1 = CatBoostRegressor(iterations=5)
model1.fit(pool)
model2 = model1.copy()
predictions1 = model1.predict(pool)
predictions2 = model2.predict(pool)
assert _check_data(predictions1, predictions2)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model2.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_cv(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(
pool,
{
"iterations": 20,
"learning_rate": 0.03,
"loss_function": "Logloss",
"eval_metric": "AUC",
"task_type": task_type,
},
dev_max_iterations_batch_size=6
)
assert "train-Logloss-mean" in results
prev_value = results["train-Logloss-mean"][0]
for value in results["train-Logloss-mean"][1:]:
assert value < prev_value
prev_value = value
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_query(task_type):
pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
results = cv(
pool,
{"iterations": 20, "learning_rate": 0.03, "loss_function": "QueryRMSE", "task_type": task_type},
dev_max_iterations_batch_size=6
)
assert "train-QueryRMSE-mean" in results
prev_value = results["train-QueryRMSE-mean"][0]
for value in results["train-QueryRMSE-mean"][1:]:
assert value < prev_value
prev_value = value
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_pairs(task_type):
pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE, pairs=QUERYWISE_TRAIN_PAIRS_FILE)
results = cv(
pool,
{
"iterations": 20,
"learning_rate": 0.03,
"random_seed": 8,
"loss_function": "PairLogit",
"task_type": task_type
},
dev_max_iterations_batch_size=6
)
assert "train-PairLogit-mean" in results
prev_value = results["train-PairLogit-mean"][0]
for value in results["train-PairLogit-mean"][1:]:
assert value < prev_value
prev_value = value
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_pairs_generated(task_type):
pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
results = cv(
pool,
{
"iterations": 10,
"learning_rate": 0.03,
"random_seed": 8,
"loss_function": "PairLogit",
"task_type": task_type
},
dev_max_iterations_batch_size=6
)
assert "train-PairLogit-mean" in results
prev_value = results["train-PairLogit-mean"][0]
for value in results["train-PairLogit-mean"][1:]:
assert value < prev_value
prev_value = value
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_custom_loss(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(
pool,
{
"iterations": 5,
"learning_rate": 0.03,
"loss_function": "Logloss",
"custom_loss": "AUC",
"task_type": task_type,
}
)
assert "test-AUC-mean" in results
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_skip_train(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(
pool,
{
"iterations": 20,
"learning_rate": 0.03,
"loss_function": "Logloss:hints=skip_train~true",
"eval_metric": "AUC",
"task_type": task_type,
},
dev_max_iterations_batch_size=6
)
assert "train-Logloss-mean" not in results
assert "train-Logloss-std" not in results
assert "train-AUC-mean" not in results
assert "train-AUC-std" not in results
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_skip_train_default(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(
pool,
{
"iterations": 20,
"learning_rate": 0.03,
"loss_function": "Logloss",
"custom_loss": "AUC",
"task_type": task_type,
},
dev_max_iterations_batch_size=6
)
assert "train-AUC-mean" not in results
assert "train-AUC-std" not in results
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_metric_period(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(
pool,
{
"iterations": 20,
"learning_rate": 0.03,
"loss_function": "Logloss",
"eval_metric": "AUC",
"task_type": task_type,
},
metric_period=5,
dev_max_iterations_batch_size=6
)
assert "train-Logloss-mean" in results
prev_value = results["train-Logloss-mean"][0]
for value in results["train-Logloss-mean"][1:]:
assert value < prev_value
prev_value = value
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
@pytest.mark.parametrize(
'with_metric_period',
[False, True],
ids=['with_metric_period=' + val for val in ['False', 'True']]
)
def test_cv_overfitting_detector(with_metric_period, task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(
pool,
{
"iterations": 20,
"learning_rate": 0.03,
"loss_function": "Logloss",
"eval_metric": "AUC",
"task_type": task_type,
},
metric_period=5 if with_metric_period else None,
early_stopping_rounds=7,
dev_max_iterations_batch_size=6
)
assert "train-Logloss-mean" in results
prev_value = results["train-Logloss-mean"][0]
for value in results["train-Logloss-mean"][1:]:
assert value < prev_value
prev_value = value
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
@pytest.mark.parametrize('param_type', ['indices', 'strings'])
def test_cv_with_cat_features_param(param_type):
if param_type == 'indices':
cat_features_param = [1, 2]
feature_names_param = None
else:
cat_features_param = ['feat1', 'feat2']
feature_names_param = ['feat' + str(i) for i in xrange(20)]
prng = np.random.RandomState(seed=20181219)
data = prng.randint(10, size=(20, 20))
label = _generate_nontrivial_binary_target(20, prng=prng)
pool = Pool(data, label, cat_features=cat_features_param, feature_names=feature_names_param)
params = {
'loss_function': 'Logloss',
'iterations': 10
}
results1 = cv(pool, params, as_pandas=False)
params_with_cat_features = params.copy()
params_with_cat_features['cat_features'] = cat_features_param
results2 = cv(pool, params_with_cat_features, as_pandas=False)
assert results1 == results2
params_with_wrong_cat_features = params.copy()
params_with_wrong_cat_features['cat_features'] = [0, 2] if param_type == 'indices' else ['feat0', 'feat2']
with pytest.raises(CatBoostError):
cv(pool, params_with_wrong_cat_features)
def test_cv_with_save_snapshot(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
with pytest.raises(CatBoostError):
cv(
pool,
{
"iterations": 20,
"learning_rate": 0.03,
"loss_function": "Logloss",
"eval_metric": "AUC",
"task_type": task_type,
"save_snapshot": True
},
dev_max_iterations_batch_size=6
)
def test_feature_importance(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
pool_querywise = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
fimp_npy_path = test_output_path(FIMP_NPY_PATH)
model = CatBoost({"iterations": 5, "learning_rate": 0.03, "task_type": task_type, "devices": "0", "loss_function": "QueryRMSE"})
model.fit(pool_querywise)
assert len(model.feature_importances_.shape) == 0
model.get_feature_importance(type=EFstrType.LossFunctionChange, data=pool_querywise)
model = CatBoostClassifier(iterations=5, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
assert (model.get_feature_importance() == model.get_feature_importance(type=EFstrType.PredictionValuesChange)).all()
failed = False
try:
model.get_feature_importance(type=EFstrType.LossFunctionChange)
except CatBoostError:
failed = True
assert failed
np.save(fimp_npy_path, np.array(model.feature_importances_))
assert len(model.feature_importances_.shape)
return local_canonical_file(fimp_npy_path)
def test_feature_importance_explicit(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
fimp_npy_path = test_output_path(FIMP_NPY_PATH)
np.save(fimp_npy_path, np.array(model.get_feature_importance(type=EFstrType.PredictionValuesChange)))
return local_canonical_file(fimp_npy_path)
def test_feature_importance_prettified(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
feature_importances = model.get_feature_importance(type=EFstrType.PredictionValuesChange, prettified=True)
fimp_txt_path = test_output_path(FIMP_TXT_PATH)
with open(fimp_txt_path, 'w') as ofile:
for f_id, f_imp in feature_importances.values:
ofile.write('{}\t{}\n'.format(f_id, f_imp))
return local_canonical_file(fimp_txt_path)
def test_interaction_feature_importance(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(pool)
fimp_npy_path = test_output_path(FIMP_NPY_PATH)
np.save(fimp_npy_path, np.array(model.get_feature_importance(type=EFstrType.Interaction)))
return local_canonical_file(fimp_npy_path)
def test_shap_feature_importance(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, learning_rate=0.03, max_ctr_complexity=1, task_type=task_type, devices='0')
model.fit(pool)
fimp_npy_path = test_output_path(FIMP_NPY_PATH)
np.save(fimp_npy_path, np.array(model.get_feature_importance(type=EFstrType.ShapValues, data=pool)))
return local_canonical_file(fimp_npy_path)
def test_shap_feature_importance_modes(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, task_type=task_type)
model.fit(pool)
modes = ["Auto", "UsePreCalc", "NoPreCalc"]
shaps_for_modes = []
for mode in modes:
shaps_for_modes.append(model.get_feature_importance(type=EFstrType.ShapValues, data=pool, shap_mode=mode))
for i in range(len(modes) - 1):
assert np.all(np.abs(shaps_for_modes[i] - shaps_for_modes[i-1]) < 1e-9)
def test_od(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=1000, learning_rate=0.03, od_type='Iter', od_wait=20, random_seed=42, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_clone(task_type):
estimator = CatBoostClassifier(
custom_metric="Accuracy",
loss_function="MultiClass",
iterations=400,
learning_rate=0.03,
task_type=task_type, devices='0')
# This is important for sklearn.base.clone since
# it uses get_params for cloning estimator.
params = estimator.get_params()
new_estimator = CatBoostClassifier(**params)
new_params = new_estimator.get_params()
for param in params:
assert param in new_params
assert new_params[param] == params[param]
def test_different_cat_features_order(task_type):
dataset = np.array([[2, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
labels = [1.2, 3.4, 9.5, 24.5]
pool1 = Pool(dataset, labels, cat_features=[0, 1])
pool2 = Pool(dataset, labels, cat_features=[1, 0])
model = CatBoost({'learning_rate': 1, 'loss_function': 'RMSE', 'iterations': 2, 'random_seed': 42, 'task_type': task_type, 'devices': '0'})
model.fit(pool1)
assert (model.predict(pool1) == model.predict(pool2)).all()
@fails_on_gpu(how='libs/options/json_helper.h:198: Error: change of option approx_on_full_history is unimplemented for task type GPU and was not default in previous run')
def test_full_history(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=1000, learning_rate=0.03, od_type='Iter', od_wait=20, random_seed=42, approx_on_full_history=True, task_type=task_type, devices='0')
model.fit(train_pool, eval_set=test_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
return compare_canonical_models(output_model_path)
def test_cv_logging(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cv(
pool,
{
"iterations": 14,
"learning_rate": 0.03,
"loss_function": "Logloss",
"task_type": task_type
},
dev_max_iterations_batch_size=6
)
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_with_not_binarized_target(task_type):
train_file = data_file('adult_not_binarized', 'train_small')
cd = data_file('adult_not_binarized', 'train.cd')
pool = Pool(train_file, column_description=cd)
cv(
pool,
{"iterations": 10, "learning_rate": 0.03, "loss_function": "Logloss", "task_type": task_type},
dev_max_iterations_batch_size=6
)
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
@pytest.mark.parametrize('loss_function', ['Logloss', 'RMSE', 'QueryRMSE'])
def test_eval_metrics(loss_function, task_type):
train, test, cd, metric = TRAIN_FILE, TEST_FILE, CD_FILE, loss_function
if loss_function == 'QueryRMSE':
train, test, cd, metric = QUERYWISE_TRAIN_FILE, QUERYWISE_TEST_FILE, QUERYWISE_CD_FILE, 'PFound'
if loss_function == 'Logloss':
metric = 'AUC'
train_pool = Pool(train, column_description=cd)
test_pool = Pool(test, column_description=cd)
model = CatBoost(params={'loss_function': loss_function, 'iterations': 20, 'thread_count': 8, 'eval_metric': metric,
'task_type': task_type, 'devices': '0', 'counter_calc_method': 'SkipTest'})
model.fit(train_pool, eval_set=test_pool, use_best_model=False)
first_metrics = np.loadtxt('catboost_info/test_error.tsv', skiprows=1)[:, 1]
second_metrics = model.eval_metrics(test_pool, [metric])[metric]
elemwise_reldiff = np.abs(first_metrics - second_metrics) / np.max((np.abs(first_metrics), np.abs(second_metrics)), 0)
elemwise_absdiff = np.abs(first_metrics - second_metrics)
elemwise_mindiff = np.min((elemwise_reldiff, elemwise_absdiff), 0)
if task_type == 'GPU':
assert np.all(abs(elemwise_mindiff) < 1e-7)
else:
assert np.all(abs(elemwise_mindiff) < 1e-9)
@pytest.mark.parametrize('loss_function', ['Logloss', 'RMSE', 'QueryRMSE'])
def test_eval_metrics_batch_calcer(loss_function, task_type):
metric = loss_function
if loss_function == 'QueryRMSE':
train, test, cd = QUERYWISE_TRAIN_FILE, QUERYWISE_TEST_FILE, QUERYWISE_CD_FILE
metric = 'PFound'
else:
train, test, cd = TRAIN_FILE, TEST_FILE, CD_FILE
train_pool = Pool(train, column_description=cd)
test_pool = Pool(test, column_description=cd)
model = CatBoost(params={'loss_function': loss_function, 'iterations': 100, 'thread_count': 8,
'eval_metric': metric, 'task_type': task_type, 'devices': '0', 'counter_calc_method': 'SkipTest'})
model.fit(train_pool, eval_set=test_pool, use_best_model=False)
first_metrics = np.loadtxt('catboost_info/test_error.tsv', skiprows=1)[:, 1]
calcer = model.create_metric_calcer([metric])
calcer.add(test_pool)
second_metrics = calcer.eval_metrics().get_result(metric)
elemwise_reldiff = np.abs(first_metrics - second_metrics) / np.max((np.abs(first_metrics), np.abs(second_metrics)), 0)
elemwise_absdiff = np.abs(first_metrics - second_metrics)
elemwise_mindiff = np.min((elemwise_reldiff, elemwise_absdiff), 0)
if task_type == 'GPU':
assert np.all(abs(elemwise_mindiff) < 1e-6)
else:
assert np.all(abs(elemwise_mindiff) < 1e-9)
@fails_on_gpu(how='assert 0.001453466387789204 < EPS, where 0.001453466387789204 = abs((0.8572555206815472 - 0.8587089870693364))')
@pytest.mark.parametrize('catboost_class', [CatBoostClassifier, CatBoostRegressor])
def test_score_from_features_data(catboost_class, task_type):
train_pool_from_files = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool_from_files = Pool(TEST_FILE, column_description=CD_FILE)
model = catboost_class(iterations=2, learning_rate=0.03, task_type=task_type, devices='0')
model.fit(train_pool_from_files)
score_from_files = model.score(test_pool_from_files)
train_features_data, test_features_data = [
get_features_data_from_file(
data_file=data_file,
drop_columns=[TARGET_IDX],
cat_feature_indices=train_pool_from_files.get_cat_feature_indices()
)
for data_file in [TRAIN_FILE, TEST_FILE]
]
model.fit(X=train_features_data, y=train_pool_from_files.get_label())
score_from_features_data = model.score(test_features_data, test_pool_from_files.get_label())
assert abs(score_from_files - score_from_features_data) < EPS
# empty
empty_test_features_data = FeaturesData(
num_feature_data=np.empty((0, test_features_data.get_num_feature_count()), dtype=np.float32),
cat_feature_data=np.empty((0, test_features_data.get_cat_feature_count()), dtype=object)
)
score_from_features_data = model.score(empty_test_features_data, [])
assert np.isnan(score_from_features_data)
@pytest.mark.parametrize('catboost_class', [CatBoostClassifier, CatBoostRegressor])
def test_call_score_with_pool_and_y(catboost_class):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = catboost_class(iterations=2)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
train_features, test_features = [
get_features_data_from_file(
data_file=data_file,
drop_columns=[TARGET_IDX],
cat_feature_indices=train_pool.get_cat_feature_indices()
)
for data_file in [TRAIN_FILE, TEST_FILE]
]
train_target = train_pool.get_label()
test_target = test_pool.get_label()
test_pool_without_label = Pool(test_features)
model.fit(train_pool)
model.score(test_pool)
with pytest.raises(CatBoostError, message="Label in X has not initialized."):
model.score(test_pool_without_label, test_target)
with pytest.raises(CatBoostError, message="Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool."):
model.score(test_pool, test_target)
with pytest.raises(CatBoostError, message="Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool."):
model.score(test_pool_without_label, test_target)
model.fit(train_features, train_target)
model.score(test_features, test_target)
with pytest.raises(CatBoostError, message="y should be specified."):
model.score(test_features)
@pytest.mark.parametrize('verbose', [5, False, True])
def test_verbose_int(verbose, task_type):
expected_line_count = {5: 3, False: 0, True: 10}
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
tmpfile = 'test_data_dumps'
with LogStdout(open(tmpfile, 'w')):
cv(
pool,
{"iterations": 10, "learning_rate": 0.03, "loss_function": "Logloss", "task_type": task_type},
verbose=verbose,
dev_max_iterations_batch_size=6
)
assert(_count_lines(tmpfile) == expected_line_count[verbose])
with LogStdout(open(tmpfile, 'w')):
train(pool, {"iterations": 10, "learning_rate": 0.03, "loss_function": "Logloss", "task_type": task_type, "devices": '0'}, verbose=verbose)
assert(_count_lines(tmpfile) == expected_line_count[verbose])
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_eval_set(task_type):
dataset = [(1, 2, 3, 4), (2, 2, 3, 4), (3, 2, 3, 4), (4, 2, 3, 4)]
labels = [1, 2, 3, 4]
train_pool = Pool(dataset, labels, cat_features=[0, 3, 2])
model = CatBoost({'learning_rate': 1, 'loss_function': 'RMSE', 'iterations': 2, 'task_type': task_type, 'devices': '0'})
eval_dataset = [(5, 6, 6, 6), (6, 6, 6, 6)]
eval_labels = [5, 6]
eval_pool = (eval_dataset, eval_labels)
model.fit(train_pool, eval_set=eval_pool)
eval_pools = [eval_pool]
model.fit(train_pool, eval_set=eval_pools)
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_object_importances(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoost({'loss_function': 'RMSE', 'iterations': 10, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
indices, scores = model.get_object_importance(pool, train_pool, top_size=10)
oimp_path = test_output_path(OIMP_PATH)
np.savetxt(oimp_path, scores)
return local_canonical_file(oimp_path)
def test_shap(task_type):
train_pool = Pool([[0, 0], [0, 1], [1, 0], [1, 1]], [0, 1, 5, 8], cat_features=[])
test_pool = Pool([[0, 0], [0, 1], [1, 0], [1, 1]])
model = CatBoostRegressor(iterations=1, max_ctr_complexity=1, depth=2, task_type=task_type, devices='0')
model.fit(train_pool)
shap_values = model.get_feature_importance(type=EFstrType.ShapValues, data=test_pool)
dataset = [(0.5, 1.2), (1.6, 0.5), (1.8, 1.0), (0.4, 0.6), (0.3, 1.6), (1.5, 0.2)]
labels = [1.1, 1.85, 2.3, 0.7, 1.1, 1.6]
train_pool = Pool(dataset, labels, cat_features=[])
model = CatBoost({'iterations': 10, 'max_ctr_complexity': 1, 'task_type': task_type, 'devices': '0'})
model.fit(train_pool)
testset = [(0.6, 1.2), (1.4, 0.3), (1.5, 0.8), (1.4, 0.6)]
predictions = model.predict(testset)
shap_values = model.get_feature_importance(type=EFstrType.ShapValues, data=Pool(testset))
assert(len(predictions) == len(shap_values))
for pred_idx in range(len(predictions)):
assert(abs(sum(shap_values[pred_idx]) - predictions[pred_idx]) < 1e-9)
fimp_txt_path = test_output_path(FIMP_TXT_PATH)
np.savetxt(fimp_txt_path, shap_values)
return local_canonical_file(fimp_txt_path)
def test_shap_complex_ctr(task_type):
pool = Pool([[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 2]], [0, 0, 5, 8], cat_features=[0, 1, 2])
model = train(pool, {'random_seed': 12302113, 'iterations': 100, 'task_type': task_type, 'devices': '0'})
shap_values = model.get_feature_importance(type=EFstrType.ShapValues, data=pool)
predictions = model.predict(pool)
assert(len(predictions) == len(shap_values))
for pred_idx in range(len(predictions)):
assert(abs(sum(shap_values[pred_idx]) - predictions[pred_idx]) < 1e-9)
fimp_txt_path = test_output_path(FIMP_TXT_PATH)
np.savetxt(fimp_txt_path, shap_values)
return local_canonical_file(fimp_txt_path)
def random_xy(num_rows, num_cols_x, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=20181219)
x = prng.randint(100, 104, size=(num_rows, num_cols_x)) # three cat values
y = _generate_nontrivial_binary_target(num_rows, prng=prng)
return x, y
def save_and_give_path(y, x, filename):
file = test_output_path(filename)
np.savetxt(file, np.hstack((np.transpose([y]), x)), delimiter='\t', fmt='%i')
return file
def test_multiple_eval_sets_no_empty():
cat_features = [0, 3, 2]
cd_file = test_output_path('cd.txt')
with open(cd_file, 'wt') as cd:
cd.write('0\tTarget\n')
for feature_no in sorted(cat_features):
cd.write('{}\tCateg\n'.format(1 + feature_no))
prng = np.random.RandomState(seed=20181219)
x, y = random_xy(6, 4, prng=prng)
train_pool = Pool(x, y, cat_features=cat_features)
x0, y0 = random_xy(0, 4, prng=prng) # empty tuple eval set
x1, y1 = random_xy(3, 4, prng=prng)
test0_file = save_and_give_path(y0, x0, 'test0.txt') # empty file eval set
with pytest.raises(CatBoostError, message="Do not create Pool for empty data"):
Pool(x0, y0, cat_features=cat_features)
model = CatBoost({'learning_rate': 1, 'loss_function': 'RMSE', 'iterations': 2,
'allow_const_label': True})
with pytest.raises(CatBoostError, message="Do not fit with empty tuple in multiple eval sets"):
model.fit(train_pool, eval_set=[(x1, y1), (x0, y0)], column_description=cd_file)
with pytest.raises(CatBoostError, message="Do not fit with empty file in multiple eval sets"):
model.fit(train_pool, eval_set=[(x1, y1), test0_file], column_description=cd_file)
with pytest.raises(CatBoostError, message="Do not fit with None in multiple eval sets"):
model.fit(train_pool, eval_set=[(x1, y1), None], column_description=cd_file)
model.fit(train_pool, eval_set=[None], column_description=cd_file)
def test_multiple_eval_sets():
# Know the seed to report it if assertion below fails
seed = 20181219
prng = np.random.RandomState(seed=seed)
def model_fit_with(train_set, test_sets, cd_file):
model = CatBoost({'use_best_model': False, 'loss_function': 'RMSE', 'iterations': 12})
model.fit(train_set, eval_set=list(reversed(test_sets)), column_description=cd_file)
return model
num_features = 11
cat_features = list(range(num_features))
cd_file = test_output_path('cd.txt')
with open(cd_file, 'wt') as cd:
cd.write('0\tTarget\n')
for feature_no in sorted(cat_features):
cd.write('{}\tCateg\n'.format(1 + feature_no))
x, y = random_xy(12, num_features, prng=prng)
train_pool = Pool(x, y, cat_features=cat_features)
x1, y1 = random_xy(13, num_features, prng=prng)
x2, y2 = random_xy(14, num_features, prng=prng)
y2 = np.zeros_like(y2)
test1_file = save_and_give_path(y1, x1, 'test1.txt')
test2_pool = Pool(x2, y2, cat_features=cat_features)
model0 = model_fit_with(train_pool, [test1_file, test2_pool], cd_file)
model1 = model_fit_with(train_pool, [test2_pool, (x1, y1)], cd_file)
model2 = model_fit_with(train_pool, [(x2, y2), test1_file], cd_file)
# The three models above shall predict identically on a test set
# (make sure they are trained with 'use_best_model': False)
xt, yt = random_xy(7, num_features, prng=prng)
test_pool = Pool(xt, yt, cat_features=cat_features)
pred0 = model0.predict(test_pool)
pred1 = model1.predict(test_pool)
pred2 = model2.predict(test_pool)
hash0 = hashlib.md5(pred0).hexdigest()
hash1 = hashlib.md5(pred1).hexdigest()
hash2 = hashlib.md5(pred2).hexdigest()
assert hash0 == hash1 and hash1 == hash2, 'seed: ' + str(seed)
def test_get_metadata_notrain():
model = CatBoost()
with pytest.raises(CatBoostError, message='Only string keys should be allowed'):
model.get_metadata()[1] = '1'
with pytest.raises(CatBoostError, message='Only string values should be allowed'):
model.get_metadata()['1'] = 1
model.get_metadata()['1'] = '1'
assert model.get_metadata().get('1', 'EMPTY') == '1'
assert model.get_metadata().get('2', 'EMPTY') == 'EMPTY'
for i in xrange(100):
model.get_metadata()[str(i)] = str(i)
del model.get_metadata()['98']
with pytest.raises(KeyError):
i = model.get_metadata()['98']
for i in xrange(0, 98, 2):
assert str(i) in model.get_metadata()
del model.get_metadata()[str(i)]
for i in xrange(0, 98, 2):
assert str(i) not in model.get_metadata()
assert str(i + 1) in model.get_metadata()
def test_metadata():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(
iterations=2,
learning_rate=0.03,
loss_function='Logloss:border=0.5',
metadata={"type": "AAA", "postprocess": "BBB"}
)
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path)
model2 = CatBoost()
model2.load_model(output_model_path)
assert 'type' in model2.get_metadata()
assert model2.get_metadata()['type'] == 'AAA'
assert 'postprocess' in model2.get_metadata()
assert model2.get_metadata()['postprocess'] == 'BBB'
return compare_canonical_models(output_model_path)
@pytest.mark.parametrize('metric', ['Logloss', 'RMSE'])
def test_util_eval_metric(metric):
metric_results = eval_metric([1, 0], [0.88, 0.22], metric)
preds_path = test_output_path(PREDS_PATH)
np.savetxt(preds_path, np.array(metric_results))
return local_canonical_file(preds_path)
@pytest.mark.parametrize('metric', ['MultiClass', 'AUC'])
def test_util_eval_metric_multiclass(metric):
metric_results = eval_metric([1, 0, 2], [[0.88, 0.22, 0.3], [0.21, 0.45, 0.1], [0.12, 0.32, 0.9]], metric)
preds_path = test_output_path(PREDS_PATH)
np.savetxt(preds_path, np.array(metric_results))
return local_canonical_file(preds_path)
def test_option_used_ram_limit():
for limit in [1000, 1234.56, 0, 0.0, 0.5,
'100', '34.56', '0', '0.0', '0.5',
'1.2mB', '1000b', '', None, 'none', 'inf']:
CatBoost({'used_ram_limit': limit})
for limit in [-1000, 'any', '-0.5', 'nolimit', 'oo']:
try:
CatBoost({'used_ram_limit': limit})
assert False, "Shall not allow used_ram_limit={!r}".format(limit)
except:
assert True
def get_values_that_json_dumps_breaks_on():
name_dtype = {name: np.__dict__[name] for name in dir(np) if (
isinstance(np.__dict__[name], type)
and re.match('(int|uint|float|bool).*', name)
)}
name_value = {}
for name, dtype in name_dtype.items():
try:
value = dtype(1)
if str(value).startswith('<'):
continue
name_value[name] = value
name_value['array of ' + name] = np.array([[1, 0], [0, 1]], dtype=dtype)
except:
pass
return name_value
def test_serialization_of_numpy_objects_internal():
from catboost._catboost import _PreprocessParams
_PreprocessParams(get_values_that_json_dumps_breaks_on())
def test_serialization_of_numpy_objects_save_model():
prng = np.random.RandomState(seed=20181219)
train_pool = Pool(*random_xy(10, 5, prng=prng))
model = CatBoostClassifier(
iterations=np.int64(2),
random_seed=np.int32(0),
loss_function='Logloss'
)
model.fit(train_pool)
output_model_path = test_output_path(OUTPUT_MODEL_PATH)
model.save_model(output_model_path, format='coreml',
export_parameters=get_values_that_json_dumps_breaks_on())
def test_serialization_of_numpy_objects_execution_case():
from catboost.eval.execution_case import ExecutionCase
ExecutionCase(get_values_that_json_dumps_breaks_on())
@fails_on_gpu(how='assert 0 == 4')
def test_metric_period_redefinition(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
tmpfile1 = test_output_path('tmpfile1')
tmpfile2 = test_output_path('tmpfile2')
model = CatBoost(dict(iterations=10, metric_period=3, task_type=task_type, devices='0'))
with LogStdout(open(tmpfile1, 'w')):
model.fit(pool)
with LogStdout(open(tmpfile2, 'w')):
model.fit(pool, metric_period=2)
assert(_count_lines(tmpfile1) == 4)
assert(_count_lines(tmpfile2) == 6)
def test_verbose_redefinition(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
tmpfile1 = test_output_path('tmpfile1')
tmpfile2 = test_output_path('tmpfile2')
model = CatBoost(dict(iterations=10, verbose=False, task_type=task_type, devices='0'))
with LogStdout(open(tmpfile1, 'w')):
model.fit(pool)
with LogStdout(open(tmpfile2, 'w')):
model.fit(pool, verbose=True)
assert(_count_lines(tmpfile1) == 0)
assert(_count_lines(tmpfile2) == 10)
class TestInvalidCustomLossAndMetric(object):
class GoodCustomLoss(object):
def calc_ders_range(self, approxes, targets, weights):
assert len(approxes) == len(targets)
der1 = 2.0 * (np.array(approxes) - np.array(targets))
der2 = np.full(len(approxes), -2.0)
if weights is not None:
assert len(weights) == len(targets)
der1 *= np.array(weights)
der2 *= np.array(weights)
return list(zip(der1, der2))
class BadCustomLoss(object):
def calc_ders_range(self, approxes, targets, weights):
raise Exception('BadCustomLoss calc_ders_range')
def calc_ders_multi(self, approxes, targets, weights):
raise Exception('BadCustomLoss calc_ders_multi')
class IncompleteCustomLoss(object):
pass
class GoodCustomMetric(object):
def get_final_error(self, error, weight):
return 0.0
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
return (0.0, 0.0)
class IncompleteCustomMetric(object):
pass
def test_loss_good_metric_none(self):
with pytest.raises(CatBoostError, match='metric is not defined|No metrics specified'):
model = CatBoost({"loss_function": self.GoodCustomLoss(), "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_loss_bad_metric_logloss(self):
if PY3:
return pytest.xfail(reason='Need fixing')
with pytest.raises(Exception, match='BadCustomLoss calc_ders_range'):
model = CatBoost({"loss_function": self.BadCustomLoss(), "eval_metric": "Logloss", "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_loss_bad_metric_multiclass(self):
if PY3:
return pytest.xfail(reason='Need fixing')
with pytest.raises(Exception, match='BadCustomLoss calc_ders_multi'):
model = CatBoost({"loss_function": self.BadCustomLoss(), "eval_metric": "MultiClass", "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_loss_incomplete_metric_logloss(self):
if PY3:
return pytest.xfail(reason='Need fixing')
with pytest.raises(Exception, match='has no.*calc_ders_range'):
model = CatBoost({"loss_function": self.IncompleteCustomLoss(), "eval_metric": "Logloss", "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_loss_incomplete_metric_multiclass(self):
if PY3:
return pytest.xfail(reason='Need fixing')
with pytest.raises(Exception, match='has no.*calc_ders_multi'):
model = CatBoost({"loss_function": self.IncompleteCustomLoss(), "eval_metric": "MultiClass", "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_custom_metric_object(self):
with pytest.raises(CatBoostError, match='custom_metric.*must be string'):
model = CatBoost({"custom_metric": self.GoodCustomMetric(), "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_loss_none_metric_good(self):
model = CatBoost({"eval_metric": self.GoodCustomMetric(), "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_loss_none_metric_incomplete(self):
with pytest.raises(CatBoostError, match='has no.*evaluate'):
model = CatBoost({"eval_metric": self.IncompleteCustomMetric(), "iterations": 2})
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_custom_loss_and_metric(self):
model = CatBoost(
{"loss_function": self.GoodCustomLoss(), "eval_metric": self.GoodCustomMetric(), "iterations": 2}
)
prng = np.random.RandomState(seed=20181219)
pool = Pool(*random_xy(10, 5, prng=prng))
model.fit(pool)
def test_silent():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
tmpfile1 = test_output_path('tmpfile1')
tmpfile2 = test_output_path('tmpfile2')
tmpfile3 = test_output_path('tmpfile3')
tmpfile4 = test_output_path('tmpfile4')
tmpfile5 = test_output_path('tmpfile5')
with LogStdout(open(tmpfile1, 'w')):
model = CatBoost(dict(iterations=10, silent=True))
model.fit(pool)
with LogStdout(open(tmpfile2, 'w')):
model = CatBoost(dict(iterations=10, silent=True))
model.fit(pool, silent=False)
with LogStdout(open(tmpfile3, 'w')):
train(pool, {'silent': True})
with LogStdout(open(tmpfile4, 'w')):
model = CatBoost(dict(iterations=10, silent=False))
model.fit(pool, silent=True)
with LogStdout(open(tmpfile5, 'w')):
model = CatBoost(dict(iterations=10, verbose=5))
model.fit(pool, silent=True)
assert(_count_lines(tmpfile1) == 0)
assert(_count_lines(tmpfile2) == 10)
assert(_count_lines(tmpfile3) == 0)
assert(_count_lines(tmpfile4) == 0)
assert(_count_lines(tmpfile5) == 0)
def test_set_params_with_synonyms(task_type):
params = {'num_trees': 20,
'max_depth': 5,
'learning_rate': 0.001,
'logging_level': 'Silent',
'loss_function': 'RMSE',
'eval_metric': 'RMSE',
'od_wait': 150,
'random_seed': 8888,
'task_type': task_type,
'devices': '0'
}
model1 = CatBoostRegressor(**params)
params_after_setting = model1.get_params()
assert(params == params_after_setting)
prng = np.random.RandomState(seed=20181219)
data = prng.randint(10, size=(20, 20))
label = _generate_nontrivial_binary_target(20, prng=prng)
train_pool = Pool(data, label, cat_features=[1, 2])
model1.fit(train_pool)
model_path = test_output_path('model.cb')
model1.save_model(model_path)
model2 = CatBoost()
model2.load_model(model_path)
params_after_save_model = model2.get_params()
assert(params.keys() != params_after_save_model.keys())
model2 = CatBoost()
model2.set_params(**model1.get_params())
assert(model1.get_params() == model2.get_params())
state = model1.__getstate__()
model2 = CatBoost()
model2.__setstate__(state)
assert(model1.get_params() == model2.get_params())
def test_feature_names_from_model():
input_file = test_output_path('pool')
with open(input_file, 'w') as inp:
inp.write('0\t1\t2\t0\n1\t2\t3\t1\n')
column_description1 = test_output_path('description1.cd')
create_cd(
label=3,
cat_features=[0, 1],
feature_names={0: 'a', 1: 'b', 2: 'ab'},
output_path=column_description1
)
column_description2 = test_output_path('description2.cd')
create_cd(
label=3,
cat_features=[0, 1],
output_path=column_description2
)
column_description3 = test_output_path('description3.cd')
create_cd(
label=3,
cat_features=[0, 1],
feature_names={0: 'a', 2: 'ab'},
output_path=column_description3
)
pools = [
Pool(input_file, column_description=column_description1),
Pool(input_file, column_description=column_description2),
Pool(input_file, column_description=column_description3)
]
output_file = test_output_path('feature_names')
with open(output_file, 'w') as output:
for i in range(len(pools)):
pool = pools[i]
model = CatBoost(dict(iterations=10))
assert model.feature_names_ is None
model.fit(pool)
output.write(str(model.feature_names_) + '\n')
return local_canonical_file(output_file)
Value_AcceptableAsEmpty = [
('', True),
('nan', True),
('NaN', True),
('NAN', True),
('NA', True),
('Na', True),
('na', True),
("#N/A", True),
("#N/A N/A", True),
("#NA", True),
("-1.#IND", True),
("-1.#QNAN", True),
("-NaN", True),
("-nan", True),
("1.#IND", True),
("1.#QNAN", True),
("N/A", True),
("NULL", True),
("n/a", True),
("null", True),
("Null", True),
("none", True),
("None", True),
('-', True),
('junk', False)
]
class TestMissingValues(object):
def assert_expected(self, pool):
assert str(pool.get_features()) == str(np.array([[1.0], [float('nan')]]))
@pytest.mark.parametrize('value,value_acceptable_as_empty', [(None, True)] + Value_AcceptableAsEmpty)
@pytest.mark.parametrize('object', [list, np.array, DataFrame, Series])
def test_create_pool_from_object(self, value, value_acceptable_as_empty, object):
if value_acceptable_as_empty:
self.assert_expected(Pool(object([[1], [value]])))
self.assert_expected(Pool(object([1, value])))
else:
with pytest.raises(CatBoostError):
Pool(object([1, value]))
@pytest.mark.parametrize('value,value_acceptable_as_empty', Value_AcceptableAsEmpty)
def test_create_pool_from_file(self, value, value_acceptable_as_empty):
pool_path = test_output_path('pool')
open(pool_path, 'wt').write('1\t1\n0\t{}\n'.format(value))
if value_acceptable_as_empty:
self.assert_expected(Pool(pool_path))
else:
with pytest.raises(CatBoostError):
Pool(pool_path)
def test_model_and_pool_compatibility():
features = [
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]
]
targets = [(f[0] ^ f[1]) & f[2] for f in features]
pool1 = Pool(features, targets, cat_features=[0, 1])
pool2 = Pool(features, targets, cat_features=[1, 2])
model = CatBoostRegressor(iterations=4)
model.fit(pool1)
with pytest.raises(CatBoostError):
model.predict(pool2)
with pytest.raises(CatBoostError):
model.get_feature_importance(type=EFstrType.ShapValues, data=pool2)
def test_shap_verbose():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost(dict(iterations=250))
model.fit(pool)
tmpfile = test_output_path('test_data_dumps')
with LogStdout(open(tmpfile, 'w')):
model.get_feature_importance(type=EFstrType.ShapValues, data=pool, verbose=12)
assert(_count_lines(tmpfile) == 5)
def test_eval_set_with_nans(task_type):
prng = np.random.RandomState(seed=20181219)
features = prng.random_sample((10, 200))
labels = prng.random_sample((10,))
features_with_nans = features.copy()
np.putmask(features_with_nans, features_with_nans < 0.5, np.nan)
model = CatBoost({'iterations': 2, 'loss_function': 'RMSE', 'task_type': task_type, 'devices': '0'})
train_pool = Pool(features, label=labels)
test_pool = Pool(features_with_nans, label=labels)
model.fit(train_pool, eval_set=test_pool)
def test_learning_rate_auto_set(task_type):
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model1 = CatBoostClassifier(iterations=10, task_type=task_type, devices='0')
model1.fit(train_pool)
predictions1 = model1.predict_proba(test_pool)
model2 = CatBoostClassifier(iterations=10, learning_rate=model1.learning_rate_, task_type=task_type, devices='0')
model2.fit(train_pool)
predictions2 = model2.predict_proba(test_pool)
assert _check_data(predictions1, predictions2)
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_learning_rate_auto_set_in_cv(task_type):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(
pool,
{"iterations": 14, "loss_function": "Logloss", "task_type": task_type},
dev_max_iterations_batch_size=6
)
assert "train-Logloss-mean" in results
prev_value = results["train-Logloss-mean"][0]
for value in results["train-Logloss-mean"][1:]:
assert value < prev_value
prev_value = value
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_shap_multiclass(task_type):
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
classifier = CatBoostClassifier(iterations=10, loss_function='MultiClass', thread_count=8, task_type=task_type, devices='0')
classifier.fit(pool)
pred = classifier.predict(pool, prediction_type='RawFormulaVal')
shap_values = classifier.get_feature_importance(
type=EFstrType.ShapValues,
data=pool,
thread_count=8
)
features_count = pool.num_col()
assert len(pred) == len(shap_values)
result = []
for i in range(len(pred)):
result_for_doc = []
for j in range(len(pred[i])):
result_for_doc = result_for_doc + list(shap_values[i][j])
assert len(shap_values[i][j]) == features_count + 1
s = sum(shap_values[i][j])
assert abs(s - pred[i][j]) < EPS
result.append(result_for_doc)
result = np.array([np.array([value for value in doc]) for doc in result])
fimp_txt_path = test_output_path(FIMP_TXT_PATH)
np.savetxt(fimp_txt_path, result)
return local_canonical_file(fimp_txt_path)
def test_loading_pool_with_numpy_int():
assert _check_shape(Pool(np.array([[2, 2], [1, 2]]), [1.2, 3.4], cat_features=[0]), object_count=2, features_count=2)
def test_loading_pool_with_numpy_str():
assert _check_shape(Pool(np.array([['abc', '2'], ['1', '2']]), np.array([1, 3]), cat_features=[0]), object_count=2, features_count=2)
def test_loading_pool_with_lists():
assert _check_shape(Pool([['abc', 2], ['1', 2]], [1, 3], cat_features=[0]), object_count=2, features_count=2)
def test_pairs_generation(task_type):
model = CatBoost({"loss_function": "PairLogit", "iterations": 2, "task_type": task_type})
pool = Pool(QUERYWISE_TRAIN_FILE, column_description=QUERYWISE_CD_FILE)
model.fit(pool)
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
@fails_on_gpu(how="cuda/methods/dynamic_boosting.h:169: Error: pool has just 3 groups or docs, can't use #1 GPUs to learn on such small pool")
def test_pairs_generation_generated(task_type):
model = CatBoost(params={'loss_function': 'PairLogit', 'iterations': 10, 'thread_count': 8, 'task_type': task_type, 'devices': '0'})
df =
|
read_table(QUERYWISE_TRAIN_FILE, delimiter='\t', header=None)
|
pandas.read_table
|
from __future__ import (absolute_import, print_function,
unicode_literals, division)
import pytest
import pandas as pd
import numpy as np
from .context import gragrapy as gg
from . import assert_data_equal
def test_stat_identity():
stat = gg.stat.identity()
iris = gg.data.iris
assert_data_equal(iris, stat.transform(iris))
@pytest.mark.parametrize('window', [5, 25])
def test_stat_smooth_mavg(window):
x = sorted(np.random.randn(50)*4)
y = sorted(np.random.randn(50))
df =
|
pd.DataFrame({'x': x, 'y': y})
|
pandas.DataFrame
|
import unittest
from random import random
from craft_ai.pandas import CRAFTAI_PANDAS_ENABLED
if CRAFTAI_PANDAS_ENABLED:
import copy
import pandas as pd
from numpy.random import randn
import craft_ai.pandas
from .data import pandas_valid_data, valid_data
from .utils import generate_entity_id
from . import settings
AGENT_ID_1_BASE = "test_pandas_1"
AGENT_ID_2_BASE = "test_pandas_2"
GENERATOR_ID_BASE = "test_pandas_generator"
SIMPLE_AGENT_CONFIGURATION = pandas_valid_data.SIMPLE_AGENT_CONFIGURATION
SIMPLE_AGENT_BOOSTING_CONFIGURATION = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION
)
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE
)
AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE = (
pandas_valid_data.AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE
)
SIMPLE_AGENT_DATA = pandas_valid_data.SIMPLE_AGENT_DATA
SIMPLE_AGENT_BOOSTING_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_DATA
SIMPLE_AGENT_BOOSTING_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_MANY_DATA
AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA = (
pandas_valid_data.AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA
)
SIMPLE_AGENT_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_MANY_DATA
COMPLEX_AGENT_CONFIGURATION = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION
COMPLEX_AGENT_CONFIGURATION_2 = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION_2
COMPLEX_AGENT_DATA = pandas_valid_data.COMPLEX_AGENT_DATA
COMPLEX_AGENT_DATA_2 = pandas_valid_data.COMPLEX_AGENT_DATA_2
DATETIME_AGENT_CONFIGURATION = pandas_valid_data.DATETIME_AGENT_CONFIGURATION
DATETIME_AGENT_DATA = pandas_valid_data.DATETIME_AGENT_DATA
MISSING_AGENT_CONFIGURATION = pandas_valid_data.MISSING_AGENT_CONFIGURATION
MISSING_AGENT_DATA = pandas_valid_data.MISSING_AGENT_DATA
MISSING_AGENT_DATA_DECISION = pandas_valid_data.MISSING_AGENT_DATA_DECISION
INVALID_PYTHON_IDENTIFIER_CONFIGURATION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_CONFIGURATION
)
INVALID_PYTHON_IDENTIFIER_DATA = pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DATA
INVALID_PYTHON_IDENTIFIER_DECISION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DECISION
)
EMPTY_TREE = pandas_valid_data.EMPTY_TREE
CLIENT = craft_ai.pandas.Client(settings.CRAFT_CFG)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_bad_index(self):
df = pd.DataFrame(randn(10, 5), columns=["a", "b", "c", "d", "e"])
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
def test_add_agent_operations_df(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_websocket(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_unexpected_property(self):
df = pd.DataFrame(
randn(300, 6),
columns=["a", "b", "c", "d", "e", "f"],
index=pd.date_range("20200101", periods=300, freq="T").tz_localize(
"Europe/Paris"
),
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_complex_agent(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_complex_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz_websocket(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasMissingAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_missing_agent(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_missing_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 300)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:59:00", tz="Europe/Paris"),
)
def test_get_agent_states_df(self):
df = CLIENT.get_agent_states(self.agent_id)
self.assertEqual(len(df), 180)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:58:20", tz="Europe/Paris"),
)
def test_tree_visualization(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
craft_ai.pandas.utils.create_tree_html(tree1, "", "constant", None, 500)
def test_display_tree_raised_error(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiError,
craft_ai.pandas.utils.display_tree,
tree1,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithOperations(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWOp")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, valid_data.VALID_OPERATIONS_SET)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_decision_tree_with_pdtimestamp(self):
# test if we get the same decision tree
decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, pd.Timestamp(valid_data.VALID_TIMESTAMP, unit="s", tz="UTC")
)
ground_truth_decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, valid_data.VALID_TIMESTAMP
)
self.assertIsInstance(decision_tree, dict)
self.assertNotEqual(decision_tree.get("_version"), None)
self.assertNotEqual(decision_tree.get("configuration"), None)
self.assertNotEqual(decision_tree.get("trees"), None)
self.assertEqual(decision_tree, ground_truth_decision_tree)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df_complex_agent(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 3)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris"),
)
def test_decide_from_contexts_df(self):
tree = CLIENT.get_agent_decision_tree(
self.agent_id, COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9
)
test_df = COMPLEX_AGENT_DATA
test_df_copy = test_df.copy(deep=True)
df = CLIENT.decide_from_contexts_df(tree, test_df)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 6)
self.assertTrue(test_df.equals(test_df_copy))
self.assertEqual(
df.first_valid_index(),
|
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris")
|
pandas.Timestamp
|
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mysql_url() -> str:
conn = os.environ["MYSQL_URL"]
return conn
def test_mysql_without_partition(mysql_url: str) -> None:
query = "select * from test_table limit 3"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 3], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_with_partition(mysql_url: str) -> None:
query = "select * from test_table"
df = read_sql(
mysql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 6], dtype="Int64"),
"test_float": pd.Series([1.1, 2.2, 3.3, 4.4, 5.5, 6.6], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_types(mysql_url: str) -> None:
query = "select * from test_types"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"),
"test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"),
"test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"),
"test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"),
"test_decimal": pd.Series([1, 2, 3], dtype="float"),
"test_varchar": pd.Series([None, "varchar2", "varchar3"], dtype="object"),
"test_char": pd.Series(["char1", "char2", "char3"], dtype="object")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_types_text(mysql_url: str) -> None:
query = "select * from test_types"
df = read_sql(mysql_url, query, protocol="text")
expected = pd.DataFrame(
index=range(3),
data={
"test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"),
"test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"),
"test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"),
"test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"),
"test_decimal": pd.Series([1, 2, 3], dtype="float"),
"test_varchar": pd.Series([None, "varchar2", "varchar3"], dtype="object"),
"test_char": pd.Series(["char1", "char2", "char3"], dtype="object")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mysql_more_types(mysql_url: str) -> None:
query = "select * from test_more_types"
df = read_sql(mysql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_tiny":
|
pd.Series([0, 1, 0], dtype="Int64")
|
pandas.Series
|
"""Extracting real pandemic and geospatial data"""
import codecs
import pickle
import datetime
import numpy as np
import pandas as pd
from scipy.signal import savgol_filter
from disease_spread_model.data_processing.text_processing import *
from disease_spread_model.config import Directories, RealDataOptions, \
ModelOptions, StartingDayBy
from disease_spread_model.model.my_math_utils import *
class RealData:
"""
Purpose of this class is to provide following real data:
* voivodeships() - list of voivodeships
* get_real_general_data() - df with geographic data (unrelated to pandemic) for voivodeships
* death_tolls() - df with death toll dynamic for voivodeships
* get_real_infected_toll() - df with infected toll dynamic for voivodeships
* get_day_of_first_n_death() - dict {voivodeship: day}
* get_starting_days_for_voivodeships_based_on_district_deaths() - dict {voivodeship: day}, day is an earliest
day in which P percent of all counties in voivodeship a death case has been reported.
* get_starting_deaths_by_hand() - dict {voivodeship: starting_death_toll}, starting_death_toll is a minimum
number of deaths since which death tool curve looks nicely to me.
* get_starting_death_toll_for_voivodeships_by_days() - {voivodeship: starting_death_toll}, death toll in
voivodeships in days specified by external dict like {voivodeship: day}
* get_shifted_real_death_toll_to_common_start_by_num_of_deaths() - df where at given day in all voivodeships
there are at least n deaths
"""
__voivodeships = ['dolnośląskie',
'kujawsko-pomorskie',
'lubelskie',
'lubuskie',
'łódzkie',
'małopolskie',
'mazowieckie',
'opolskie',
'podkarpackie',
'podlaskie',
'pomorskie',
'śląskie',
'świętokrzyskie',
'warmińsko-mazurskie',
'wielkopolskie',
'zachodniopomorskie'
]
# http://eregion.wzp.pl/obszary/stan-i-struktura-ludnosci
__fname_GUS_general_pop_data = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"raw/"
f"geospatial/"
f"GUS_general_pop_data.csv")
__fname_counties_in_voivodeship_final = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"processed/"
f"geospatial/"
f"counties_in_voivodeship.pck")
# http://eregion.wzp.pl/liczba-mieszkancow-przypadajacych-na-1-sklep
__fname_peoples_per_shop = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"raw/"
f"geospatial/"
f"mieszkancow_na_sklep.csv")
# POPULATION DENSITY DATA SOURCE (entered manually)
# https://stat.gov.pl/obszary-tematyczne/ludnosc/
# ludnosc/powierzchnia-i-ludnosc-w-przekroju-terytorialnym-w-2021-roku,7,18.html
# https://bit.ly/covid19_powiaty
__fname_pandemic_day_by_day_early = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"raw/"
f"pandemic/"
f"early_data/"
f"COVID-19_04.03-23.11.xlsm")
__dir_pandemic_day_by_day_late_raw = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"raw/"
f"pandemic/"
f"late_data/")
__fname_pandemic_day_by_day_late_final = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"processed/"
f"pandemic/"
f"pandemic_day_by_day.pck")
__fname_entire_death_toll_final = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"processed/"
f"pandemic/"
f"entire_death_toll.pck")
__fname_entire_interpolated_death_toll_final = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"processed/"
f"pandemic/"
f"entire_interpolated_death_toll.pck")
__fname_entire_infected_toll_final = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"processed/"
f"pandemic/"
f"entire_infected_toll.pck")
__fname_df_excel_deaths_pandemic_early_final = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"processed/"
f"pandemic/"
f"df_excel_deaths_pandemic_early_final.pck")
__fname_df_excel_infections_pandemic_early_final = (
f"{Directories.ABM_DIR}/"
f"disease_spread_model/"
f"data/"
f"processed/"
f"pandemic/"
f"df_excel_infections_pandemic_early_final.pck")
# Get voivodeships ************************************************************************************************
@classmethod
def voivodeships(cls):
""" Returns list containing lowercase voivodeship names. """
return cls.__voivodeships
@staticmethod
def day_to_date(day_number: Union[int, str]) -> str:
"""
Return `2020-03-04 + day number` as str in format YYYY-MM-DD.
If date (str) is passed than it is returned without any changes.
Note: `datetime.timedelta` is messed up such that `datetime.timedelta(int)`
works fine, but `datetime.timedelta(np.int64)` raises TypeError.
So there is need to convert int to int.
"""
if isinstance(day_number, str):
return day_number
else:
day_number = int(day_number)
date0 = datetime.datetime(2020, 3, 4)
date0 += datetime.timedelta(days=day_number)
return date0.strftime('%Y-%m-%d')
@staticmethod
def date_to_day(date: Union[int, str]) -> int:
"""
Return `date - 2020-03-04` as int.
If day (int) is passed than it is returned without any changes.
Note: `datetime.timedelta` is messed up such that `datetime.timedelta(int)`
works fine, but `datetime.timedelta(np.int64)` raises TypeError.
So there is need to convert int to int.
"""
if not isinstance(date, str):
return date
date0 = datetime.datetime(2020, 3, 4)
return (datetime.datetime.strptime(date, "%Y-%m-%d") - date0).days
# Get counties in voivodeship ************************************************************************************
@classmethod
def __get_counties_in_voivodeship(cls):
"""
Returns dict in which key = voivodeship, value = list of counties in it.
"""
io = cls.__fname_pandemic_day_by_day_early
sheet_name = 'Suma przypadków'
df_excel = pd.read_excel(io=io, sheet_name=sheet_name)
df_excel.drop(columns=['Kod', "Unnamed: 1"], inplace=True)
df_excel.drop([0, 1], inplace=True)
voivodeships = cls.voivodeships()
counties_in_voivodeship = {}
counties = []
voivodeship = None
for name in df_excel['Nazwa']:
if pd.notna(name):
if name.lower() in voivodeships:
voivodeship = name
counties = []
else:
counties.append(name)
else:
counties_in_voivodeship[voivodeship] = counties
# lowercase voivodeships to be consistent in general
counties_in_voivodeship = {k.lower(): v for k, v in
counties_in_voivodeship.items()}
return counties_in_voivodeship
@classmethod
def __save_counties_in_voivodeship_as_pickle(cls):
counties_in_voivodeship = cls.__get_counties_in_voivodeship()
save_dir = os.path.split(cls.__fname_counties_in_voivodeship_final)[0]
Path(save_dir).mkdir(parents=True, exist_ok=True)
with open(cls.__fname_counties_in_voivodeship_final, 'wb') as handle:
pickle.dump(counties_in_voivodeship, handle,
protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def __load_counties_in_voivodeship_from_pickle(cls):
with open(cls.__fname_counties_in_voivodeship_final, 'rb') as handle:
counties_in_voivodeship = pickle.load(handle)
return counties_in_voivodeship
@classmethod
def get_counties_in_voivodeship(cls):
"""
Returns dict in which: key = voivodeship name, value = list of counties in it.
"""
try:
counties_in_voivodeship = cls.__load_counties_in_voivodeship_from_pickle()
except FileNotFoundError:
cls.__save_counties_in_voivodeship_as_pickle()
counties_in_voivodeship = cls.__load_counties_in_voivodeship_from_pickle()
return counties_in_voivodeship
# Get data about population, population density, urbanization and
# shops among voivodeships [2019 and 2021] *******
@classmethod
def get_real_general_data(
cls,
customers_in_household=ModelOptions.CUSTOMERS_IN_HOUSEHOLD):
"""
Get DataFrame with real population data independent of the pandemic like
urbanization , population density and number of shops.
DataFrame will also include columns with some recommended model parameters for each voivodeship like:
grid side or N - number of households in one grid cell.
Note 1: recommended model parameters are in columns named ,,xxx MODEL''.
Note 2: ,,customers_in_household'' only affects on recommended number of households in one grid cell i.e.
column ,,N MODEL''
Columns = ['population', 'urbanization', 'population density', 'peoples per shop', 'shops', 'shops MODEL',
'grid side MODEL', 'N MODEL']
"""
df = pd.read_csv(cls.__fname_GUS_general_pop_data)
df = df.drop([0, 1]) # drop redundant columns
# manually set column names, because they are broken
df.columns = ['voivodeship',
'pop cities 2010', 'pop cities 2019',
'pop village 2010', 'pop village 2019',
'urbanization 2010', 'urbanization 2019']
# drop not interesting columns
df.drop(columns=['pop cities 2010',
'pop village 2010',
'urbanization 2010'],
inplace=True)
# make column values compatible with the accepted convention
df['voivodeship'] = df['voivodeship'].str.lower()
# Convert strings to numbers
df['pop cities 2019'] = [int(item.replace(' ', '')) for item in
df['pop cities 2019']]
df['pop village 2019'] = [int(item.replace(' ', '')) for item in
df['pop village 2019']]
df['urbanization 2019'] = [float(item.replace(',', '.')) for item in
df['urbanization 2019']]
# Make new column having population of voivodeships
df['population'] = df['pop cities 2019'] + df['pop village 2019']
# drop not interesting columns
df.drop(columns=['pop cities 2019', 'pop village 2019'], inplace=True)
# set new names to columns as now there are
# only data from 2019 (not 2010)
df.columns = ['voivodeship', 'urbanization', 'population']
# set voivodeship column as an index column
df.set_index('voivodeship', inplace=True)
# keep only those columns
df = df[['population', 'urbanization']]
# ------------------------------------------------------------
# Get data about population density from GUS webpage [2021] --------
# https://stat.gov.pl/download/gfx/portalinformacyjny/pl/defaultaktualnosci/5468/7/18/1/
# powierzchnia_i_ludnosc_w_przekroju_terytorialnym_w_2021_roku_tablice.xlsx
population_density_mixed = {'dolnośląskie': 145,
'kujawsko-pomorskie': 115,
'lubelskie': 83,
'lubuskie': 72,
'łódzkie': 134,
'małopolskie': 225,
'mazowieckie': 153,
'opolskie': 104,
'podkarpackie': 119,
'podlaskie': 58,
'pomorskie': 128,
'śląskie': 364,
'świętokrzyskie': 105,
'warmińsko-mazurskie': 59,
'wielkopolskie': 117,
'zachodniopomorskie': 74}
# add new column with population density
df['population density'] = list(population_density_mixed.values())
# -----------------------------------------------------------------
# Put data about num of peoples per shop [2021] to new tmp DataFrame
tmp_df = pd.read_csv(cls.__fname_peoples_per_shop)
tmp_df.drop(0, inplace=True)
tmp_df.rename(columns={'Województwa/Lata': 'voivodeship'},
inplace=True)
tmp_df['voivodeship'] = tmp_df['voivodeship'].str.lower()
tmp_df.set_index('voivodeship', inplace=True)
# Get Series containing data about
# number of peoples per shop for all voivodeships
shop_series = pd.Series(tmp_df['2019'], name='peoples per shop')
# Merge previous DataFrame with peoples per shop Series
df = pd.concat([df, shop_series], axis=1)
# Determine N and grid size
# based on population and peoples per shop
shops = df['population'] / df['peoples per shop']
shops_model = shops / 20
# add data about number of shops to DataFrame
df['shops'] = shops.astype(int)
df['shops MODEL'] = shops_model.astype(int)
# add column with grid side length such that:
# grid_side_length**2 = rescaled num of shops in voivodeship
grid_side = np.sqrt(df['shops MODEL'])
df['grid side MODEL'] = grid_side.astype(int)
N_model = df['population'] / customers_in_household
N_model /= df['grid side MODEL'] ** 2
df['N MODEL'] = N_model.astype(int)
# ----------------------------------------------------------------------------------------------------------
return df
# Get data about dead toll and recovered toll among voivodeships during pandemic ******************************
@classmethod
def __convert_files_to_UTF8(cls):
"""
For all files in directory given by self.dir_pandemic_day_by_day_late_raw:
* converts text files format from ANSI to UTF-8.
* if original file format is UTF-8 then leaves it as it is.
Directory self.dir_pandemic_day_by_day_late_raw should contain only
such text files.
Python by default support UTF-8 encoding, but this class operates on
external data files and some of them are in ANSI and others in UTF-8.
"""
fnames = all_fnames_from_dir(
directory=cls.__dir_pandemic_day_by_day_late_raw)
for fname in fnames:
try:
with codecs.open(fname, 'r', encoding='UTF-8') as file:
file.read()
except UnicodeDecodeError:
# read input file
with codecs.open(fname, 'r', encoding='mbcs') as file:
lines = file.read()
# write output file
with codecs.open(fname, 'w', encoding='utf8') as file:
file.write(lines)
print(f"File {fname} converted to UTF-8 from ANSI")
@classmethod
def __get_significant_pandemic_late_data(cls, fname):
"""
Extracts data that I care of from one data file shared by GUS.
This function returns Dataframe where row index are voivodeship names (in lowercase)
and columns are: ['day', 'liczba_przypadkow', 'liczba_ozdrowiencow', 'zgony']
One file has data about all voivodeship and one day of pandemic.
"""
# read data
df = pd.read_csv(fname, sep=';')
# get only filename (no entire directory) to construct date from it
fname_only = fname
while '/' in fname_only:
pos = fname_only.find('/')
fname_only = fname_only[pos + 1:]
# read day from filename
day = f'{fname_only[:4]}-{fname_only[4:6]}-{fname_only[6:8]}'
# make dataframe about voivodeships not counties
df = df.groupby(['wojewodztwo']).sum()
# insert day into DataFrame column
df['day'] = day
# make sure that DataFrame contains ,,recovered people'' column
if 'liczba_ozdrowiencow' not in df.columns:
df['liczba_ozdrowiencow'] = np.NaN
# keep only those columns which have data important dor the project
to_care = ['day', 'liczba_przypadkow', 'liczba_ozdrowiencow', 'zgony']
df = df[to_care]
# sort dataframe by voivodeship in ascending order
df.sort_values(by=['wojewodztwo'], inplace=True)
return df
@classmethod
def __prepare_real_pandemic_late_data(cls):
"""
Extracts data that I care of from all data files shared by GUS.
This function returns a dict in which keys are voivodeship names (in lowercase)
and values are dataframes created by: self.__get_significant_pandemic_late_data
so columns are ['day', 'liczba_przypadkow', 'liczba_ozdrowiencow', 'zgony']
Directory that should contains all data files about pandemic from GUS is stored by the
,,self.dir_pandemic_day_by_day_late_raw'' variable.
Directory can't contain any other files.
"""
# make sure that files are readable by default settings
cls.__convert_files_to_UTF8()
# get list of files from given directory
fnames = all_fnames_from_dir(
directory=cls.__dir_pandemic_day_by_day_late_raw)
# create empty Dataframes having a desired format (same like read dataframe ,,df'')
df = cls.__get_significant_pandemic_late_data(fname=fnames[0])
voivodeships = df.index.to_list()
cols = df.columns.to_list()
result_dict = {
voivodeship: pd.DataFrame(columns=cols) for voivodeship in
voivodeships
}
# one day of pandemic is one file so iterate over them, grab data and insert as rows to Dataframes
for fname in fnames:
df = cls.__get_significant_pandemic_late_data(fname=fname)
# modify result voivodeships adding read data, next file = new row
for voivodeship in voivodeships:
voivodeship_df = result_dict[voivodeship]
voivodeship_df.loc[-1] = df.loc[voivodeship,
:].values # adding a row
voivodeship_df.index = voivodeship_df.index + 1 # shifting index
voivodeship_df.sort_index() # sorting by index
# Sort pandemic DataFrames row by days (chronological)
for val in result_dict.values():
val.sort_values(by=['day'], inplace=True)
val.reset_index(drop=True, inplace=True)
return result_dict
@classmethod
def __save_real_late_data_as_pickle(cls):
"""
Saves data obtained by function ,,__prepare_real_pandemic_late_data''
to a binary file.
This function is for time saving, because obtaining data from data files
given by GUS is time consuming and I may need to do get them over and over again.
Function saves obtained data to file given by
,,self.fname_pandemic_day_by_day_late_final'' variable
"""
real_late_data = cls.__prepare_real_pandemic_late_data()
save_dir = os.path.split(cls.__fname_pandemic_day_by_day_late_final)[0]
Path(save_dir).mkdir(parents=True, exist_ok=True)
with open(cls.__fname_pandemic_day_by_day_late_final, 'wb') as handle:
pickle.dump(real_late_data, handle,
protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def __load_real_data_from_pickle(cls):
"""
Loads data previously saved by function ,,self.__save_real_late_data_as_pickle''.
This function is for time saving, because obtaining data from data files
given by GUS is time consuming and I may need to do get them over and over again.
Function loads obtained data from file given by
,,self.fname_pandemic_day_by_day_late_final'' variable
"""
with open(cls.__fname_pandemic_day_by_day_late_final, 'rb') as handle:
real_data = pickle.load(handle)
return real_data
@classmethod
def __get_real_late_pandemic_data(cls):
"""
Returns data obtained by function ,,self.__prepare_real_pandemic_late_data'',
but instead of calling it every time, calls it once and then save data to binary file and
when called reads data from that file.
If file stored in directory ,,self.fname_pandemic_day_by_day_late_final'' exist then it
is considered as function ,,self.__prepare_real_pandemic_late_data'' was already called
and result of it is stored in dir given by ,,self.fname_pandemic_day_by_day_late_final'' variable.
"""
try:
real_pandemic_data = cls.__load_real_data_from_pickle()
except FileNotFoundError:
cls.__save_real_late_data_as_pickle()
real_pandemic_data = cls.__load_real_data_from_pickle()
return real_pandemic_data
@classmethod
def __get_death_toll_at_end_of_early_pandemic(cls):
"""
Returns death toll at the first day when GUS started to publish it's data.
This function helps merge data from GUS and from private dataset.
First source (private source) has death toll, but second (GUS) has deaths in each day.
Finally I want to have death toll at the beginning of pandemic to current day.
Directory to the file that contains data from private source is stored in
,,self.fname_pandemic_day_by_day_early'' variable
"""
io = cls.__fname_pandemic_day_by_day_early
sheet_name = '<NAME>'
df_excel = pd.read_excel(io=io, sheet_name=sheet_name)
valid_rows = [voivodeship.upper() for voivodeship in
cls.voivodeships()]
df_excel = df_excel.loc[df_excel['Nazwa'].isin(valid_rows)]
df_excel.drop(columns=[158, 'Unnamed: 1'], inplace=True)
df_excel['Nazwa'] = [name.lower() for name in df_excel['Nazwa']]
df_excel.rename(columns={'Nazwa': 'voivodeship'}, inplace=True)
df_excel.set_index('voivodeship', inplace=True)
dates = pd.date_range(start='2020-03-04', end='2020-11-24').tolist()
dates = [f'{i.year:04d}-{i.month:02d}-{i.day:02d}' for i in dates]
df_excel.columns = dates
df_excel.drop(columns=['2020-11-24'], inplace=True)
return df_excel.max(axis=1)
@classmethod
def __get_death_toll_for_early_pandemic(cls):
io = cls.__fname_pandemic_day_by_day_early
sheet_name = '<NAME>'
df_excel = pd.read_excel(io=io, sheet_name=sheet_name)
valid_rows = [voivodeship.upper() for voivodeship in
cls.voivodeships()]
df_excel = df_excel.loc[df_excel['Nazwa'].isin(valid_rows)]
df_excel.drop(columns=[158, 'Unnamed: 1'], inplace=True)
df_excel['Nazwa'] = [name.lower() for name in df_excel['Nazwa']]
df_excel.rename(columns={'Nazwa': 'voivodeship'}, inplace=True)
df_excel.set_index('voivodeship', inplace=True)
dates = pd.date_range(start='2020-03-04', end='2020-11-24').tolist()
dates = [f'{i.year:04d}-{i.month:02d}-{i.day:02d}' for i in dates]
df_excel.columns = dates
df_excel.drop(columns=['2020-11-24'], inplace=True)
return df_excel
@classmethod
def __merge_properly_early_and_late_pandemic_death_toll(cls):
late_pandemic_data = cls.__get_real_late_pandemic_data()
late_days = late_pandemic_data['Cały kraj']['day'].to_list()
late_pandemic_death_toll = pd.DataFrame(
columns=['voivodeship'] + late_days)
late_pandemic_death_toll.set_index('voivodeship', inplace=True)
death_toll_at_end_of_early_stage = cls.__get_death_toll_at_end_of_early_pandemic()
for voivodeship, df in late_pandemic_data.items():
if voivodeship != '<NAME>':
late_pandemic_death_toll.loc[voivodeship] = \
np.cumsum(df['zgony'].to_list()) + \
death_toll_at_end_of_early_stage[voivodeship]
late_pandemic_death_toll = late_pandemic_death_toll.astype(int)
early_pandemic_death_toll = cls.__get_death_toll_for_early_pandemic()
return early_pandemic_death_toll.merge(
late_pandemic_death_toll, on='voivodeship', how='inner'
)
@classmethod
def __save_entire_death_toll_as_pickle(cls):
entire_death_toll = cls.__merge_properly_early_and_late_pandemic_death_toll()
save_dir = os.path.split(cls.__fname_entire_death_toll_final)[0]
Path(save_dir).mkdir(parents=True, exist_ok=True)
with open(cls.__fname_entire_death_toll_final, 'wb') as handle:
pickle.dump(entire_death_toll, handle,
protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def __load_entire_death_toll_from_pickle(cls):
with open(cls.__fname_entire_death_toll_final, 'rb') as handle:
entire_death_toll = pickle.load(handle)
return entire_death_toll
@classmethod
def death_tolls(cls):
try:
entire_death_toll = cls.__load_entire_death_toll_from_pickle()
except FileNotFoundError:
cls.__save_entire_death_toll_as_pickle()
entire_death_toll = cls.__load_entire_death_toll_from_pickle()
return entire_death_toll
# Get data about infected toll and recovered toll among voivodeships during pandemic **************************
@classmethod
def __get_infected_toll_for_early_pandemic(cls):
io = cls.__fname_pandemic_day_by_day_early
sheet_name = 'Suma przypadków'
df_excel =
|
pd.read_excel(io=io, sheet_name=sheet_name)
|
pandas.read_excel
|
"""
Cell Deconvolutional Network (scaden) class
"""
import os
import logging
import sys
import gc
import tensorflow as tf
import numpy as np
import pandas as pd
from anndata import read_h5ad
import collections
from .functions import sample_scaling
from rich.progress import Progress, BarColumn
logger = logging.getLogger(__name__)
class Scaden(object):
"""
scaden class
"""
def __init__(
self,
model_dir,
model_name,
batch_size=128,
learning_rate=0.0001,
num_steps=1000,
seed=0,
hidden_units=[256, 128, 64, 32],
do_rates=[0, 0, 0, 0],
):
self.model_dir = model_dir
self.batch_size = batch_size
self.model_name = model_name
self.beta1 = 0.9
self.beta2 = 0.999
self.learning_rate = learning_rate
self.data = None
self.n_classes = None
self.labels = None
self.x = None
self.y = None
self.num_steps = num_steps
self.scaling = "log_min_max"
self.sig_genes = None
self.sample_names = None
self.hidden_units = hidden_units
self.do_rates = do_rates
# Set seeds for reproducibility
tf.random.set_seed(seed)
os.environ["TF_DETERMINISTIC_OPS"] = "1"
np.random.seed(seed)
def scaden_model(self, n_classes):
"""Create the Scaden model"""
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(self.hidden_units[0], activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(self.do_rates[0]))
model.add(tf.keras.layers.Dense(self.hidden_units[1], activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(self.do_rates[1]))
model.add(tf.keras.layers.Dense(self.hidden_units[2], activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(self.do_rates[2]))
model.add(tf.keras.layers.Dense(self.hidden_units[3], activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(self.do_rates[3]))
model.add(tf.keras.layers.Dense(n_classes, activation=tf.nn.softmax))
return model
def compute_loss(self, logits, targets):
"""
Compute L1 loss
:param logits:
:param targets:
:return: L1 loss
"""
loss = tf.reduce_mean(input_tensor=tf.math.square(logits - targets))
return loss
def compute_accuracy(self, logits, targets, pct_cut=0.05):
"""
Compute prediction accuracy
:param targets:
:param pct_cut:
:return:
"""
equality = tf.less_equal(
tf.math.abs(tf.math.subtract(logits, targets)), pct_cut
)
accuracy = tf.reduce_mean(input_tensor=tf.cast(equality, tf.float32))
return accuracy
def correlation_coefficient(self, logits, targets):
"""
Calculate the pearson correlation coefficient
:param logits:
:param targets:
:return:
"""
mx = tf.reduce_mean(input_tensor=logits)
my = tf.reduce_mean(input_tensor=targets)
xm, ym = logits - mx, targets - my
r_num = tf.reduce_sum(input_tensor=tf.multiply(xm, ym))
r_den = tf.sqrt(
tf.multiply(
tf.reduce_sum(input_tensor=tf.square(xm)),
tf.reduce_sum(input_tensor=tf.square(ym)),
)
)
r = tf.divide(r_num, r_den)
r = tf.maximum(tf.minimum(r, 1.0), -1.0)
return r
def visualization(self, logits, targets, classes):
"""
Create evaluation metrics
:param targets:
:param classes:
:return:
"""
# add evaluation metrics
rmse = tf.compat.v1.metrics.root_mean_squared_error(logits, targets)[1]
pcor = self.correlation_coefficient(logits, targets)
eval_metrics = {"rmse": rmse, "pcor": pcor}
for i in range(logits.shape[1]):
eval_metrics[
"mre_" + str(classes[i])
] = tf.compat.v1.metrics.mean_relative_error(
targets[:, i], logits[:, i], targets[:, i]
)[
0
]
eval_metrics[
"mae_" + str(classes[i])
] = tf.compat.v1.metrics.mean_absolute_error(
targets[:, i], logits[:, i], targets[:, i]
)[
0
]
eval_metrics["pcor_" + str(classes[i])] = self.correlation_coefficient(
targets[:, i], logits[:, i]
)
eval_metrics["mre_total"] = tf.compat.v1.metrics.mean_relative_error(
targets, logits, targets
)[1]
eval_metrics["mae_total"] = tf.compat.v1.metrics.mean_relative_error(
targets, logits, targets
)[1]
eval_metrics["accuracy01"] = self.compute_accuracy(
logits, targets, pct_cut=0.01
)
eval_metrics["accuracy05"] = self.compute_accuracy(
logits, targets, pct_cut=0.05
)
eval_metrics["accuracy1"] = self.compute_accuracy(logits, targets, pct_cut=0.1)
# Create summary scalars
for key, value in eval_metrics.items():
tf.compat.v1.summary.scalar(key, value)
tf.compat.v1.summary.scalar("loss", self.loss)
merged_summary_op = tf.compat.v1.summary.merge_all()
return merged_summary_op
def load_h5ad_file(self, input_path, batch_size, datasets=[]):
"""
Load input data from a h5ad file and divide into training and test set
:param input_path: path to h5ad file
:param batch_size: batch size to use for training
:param datasets: a list of datasets to extract from the file
:return: Dataset object
"""
try:
raw_input = read_h5ad(input_path)
except:
logger.error(
"Could not load training data file! Is it a .h5ad file generated with `scaden process`?"
)
sys.exit()
# Subset dataset if --train_datasets is given
if len(datasets) > 0:
all_ds = collections.Counter(raw_input.obs["ds"])
# Check that given datasets are all actually available
for ds in datasets:
if not ds in all_ds:
logger.warn(
f"The dataset '[cyan]{ds}[/cyan]' could not be found in the training data! Is the name correct?"
)
for ds in all_ds:
if ds not in datasets:
raw_input = raw_input[raw_input.obs["ds"] != ds].copy()
# Create training dataset
ratios = [raw_input.obs[ctype] for ctype in raw_input.uns["cell_types"]]
self.x_data = raw_input.X.astype(np.float32)
self.y_data = np.array(ratios, dtype=np.float32).transpose()
self.data = tf.data.Dataset.from_tensor_slices((self.x_data, self.y_data))
self.data = self.data.shuffle(1000).repeat().batch(batch_size=batch_size)
self.data_iter = iter(self.data)
# Extract celltype and feature info
self.labels = raw_input.uns["cell_types"]
self.sig_genes = list(raw_input.var_names)
def load_prediction_file(self, input_path, sig_genes, labels, scaling=None):
"""
Load a file to perform prediction on it
:param input_path: path to input file
:param sig_genes: the signature genes to use
:param scaling: which scaling to perform
:return: Dataset object
"""
# Load data
data = pd.read_table(input_path, sep="\t", index_col=0)
sample_names = list(data.columns)
# check for duplicates
data_index = list(data.index)
if not (len(data_index) == len(set(data_index))):
logger.warn(
"Scaden Warning: Your mixture file conatins duplicate genes! The first occuring gene will be used for every duplicate."
)
data = data.loc[~data.index.duplicated(keep="first")]
data = data.loc[sig_genes]
data = data.T
# Scaling
if scaling:
data = sample_scaling(data, scaling_option=scaling)
self.data = data
return sample_names
def build_model(self, input_path, train_datasets, mode="train"):
"""
Build the model graph
:param reuse:
:return:
"""
self.global_step = tf.Variable(0, name="global_step", trainable=False)
# Load training data
if mode == "train":
self.load_h5ad_file(
input_path=input_path,
batch_size=self.batch_size,
datasets=train_datasets,
)
# Load prediction data
if mode == "predict":
self.sample_names = self.load_prediction_file(
input_path=input_path,
sig_genes=self.sig_genes,
labels=self.labels,
scaling=self.scaling,
)
# Build the model or load if available
self.n_classes = len(self.labels)
try:
self.model = tf.keras.models.load_model(self.model_dir, compile=False)
logger.info(f"Loaded pre-trained model: [cyan]{self.model_name}")
except:
self.model = self.scaden_model(n_classes=self.n_classes)
def train(self, input_path, train_datasets):
"""
Train the model
:param num_steps:
:return:
"""
# Define the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
# Build model graph
self.build_model(
input_path=input_path, train_datasets=train_datasets, mode="train"
)
# Training loop
progress_bar = Progress(
"[bold blue]{task.description}",
"[bold cyan]Step: {task.fields[step]}, Loss: {task.fields[loss]}",
BarColumn(bar_width=None),
)
training_progress = progress_bar.add_task(
self.model_name, total=self.num_steps, step=0, loss=1
)
with progress_bar:
for step in range(self.num_steps):
x, y = self.data_iter.get_next()
with tf.GradientTape() as tape:
self.logits = self.model(x, training=True)
loss = self.compute_loss(self.logits, y)
grads = tape.gradient(loss, self.model.trainable_weights)
optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
progress_bar.update(
training_progress, advance=1, step=step, loss=f"{loss:.4f}"
)
# Collect garbage after 100 steps - otherwise runs out of memory
if step % 100 == 0:
gc.collect()
# Save the trained model
self.model.save(self.model_dir)
pd.DataFrame(self.labels).to_csv(
os.path.join(self.model_dir, "celltypes.txt"), sep="\t"
)
pd.DataFrame(self.sig_genes).to_csv(
os.path.join(self.model_dir, "genes.txt"), sep="\t"
)
def predict(self, input_path):
"""
Perform prediction with a pre-trained model
:param input_path: prediction data path
:return:
"""
# Load signature genes and celltype labels
sig_genes =
|
pd.read_table(self.model_dir + "/genes.txt", index_col=0)
|
pandas.read_table
|
# -*- coding: utf-8 -*-
# %%
import numpy as np
import pandas as pd
import tqdm
import vdj.io
import vdj.bayes
# %%
# Load teh dwell time data sets.
dwell = pd.read_csv('../../data/compiled_dwell_times.csv')
# Load the stan model for the pooled case
model = vdj.bayes.StanModel('../stan/pooled_cutting_rate.stan')
# %% Perform the inference
# Set up the storage list for the summaries
stats = []
samps = []
# Iterate through each mutant.
for g, d in tqdm.tqdm(dwell.groupby('mutant')):
# Define the data dictionary
cuts = d[d['cut']==1]
unloops = d[d['cut']==0]
data_dict = {'N':len(cuts), 'M':len(unloops),
'cut':cuts['dwell_time_s'], 'unloop':unloops['dwell_time_s']}
# Sample and compute the parameter summary
_, samples = model.sample(data_dict)
summary = model.summary(parnames=['tau', 'r'])
summary['mutant'] = g
samples['mutant'] = g
# Get the sequences
seq = vdj.io.mutation_parser(g)
summary['N'] = len(d)
summary['seq'] = seq['seq']
summary['n_muts'] = seq['n_muts']
samples['N'] = len(d)
samples['seq'] = seq['seq']
samples['n_muts'] = seq['n_muts']
stats.append(summary)
samps.append(samples)
stats =
|
pd.concat(stats)
|
pandas.concat
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities functions to manipulate the data in the colab."""
import datetime
import itertools
import operator
from typing import List, Optional
import dataclasses
import numpy as np
import pandas as pd
import pandas.io.formats.style as style
from scipy import stats
from trimmed_match.design import common_classes
TimeWindow = common_classes.TimeWindow
FormatOptions = common_classes.FormatOptions
_operator_functions = {'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne}
_inverse_op = {'<': '>', '<=': '>=', '>': '<', '>=': '<=', '=': '!='}
@dataclasses.dataclass
class CalculateMinDetectableIroas:
"""Class for the calculation of the minimum detectable iROAS.
Hypothesis testing for H0: iROAS=0 vs H1: iROAS>=min_detectable_iroas based
on one sample X which follows a normal distribution with mean iROAS (unknown)
and standard deviation rmse (known).
Typical usage example:
calc_min_detectable_iroas = CalculateMinDetectableIroas(0.1, 0.9)
min_detectable_iroas = calc_min_detectable_iroas.at(2.0)
"""
# chance of rejecting H0 incorrectly when H0 holds.
significance_level: float = 0.1
# chance of rejecting H0 correctly when H1 holds.
power_level: float = 0.9
# minimum detectable iroas at rmse=1.
rmse_multiplier: float = dataclasses.field(init=False)
def __post_init__(self):
"""Calculates rmse_multiplier.
Raises:
ValueError: if significance_level or power_level is not in (0, 1).
"""
if self.significance_level <= 0 or self.significance_level >= 1.0:
raise ValueError('significance_level must be in (0, 1), but got '
f'{self.significance_level}.')
if self.power_level <= 0 or self.power_level >= 1.0:
raise ValueError('power_level must be in (0, 1), but got '
f'{self.power_level}.')
self.rmse_multiplier = (
stats.norm.ppf(self.power_level) +
stats.norm.ppf(1 - self.significance_level))
def at(self, rmse: float) -> float:
"""Calculates min_detectable_iroas at the specified rmse."""
return rmse * self.rmse_multiplier
def find_days_to_exclude(
dates_to_exclude: List[str]) -> List[TimeWindow]:
"""Returns a list of time windows to exclude from a list of days and weeks.
Args:
dates_to_exclude: a List of strings with format indicating a single day as
'2020/01/01' (YYYY/MM/DD) or an entire time period as
'2020/01/01 - 2020/02/01' (indicating start and end date of the time period)
Returns:
days_exclude: a List of TimeWindows obtained from the list in input.
"""
days_exclude = []
for x in dates_to_exclude:
tmp = x.split('-')
if len(tmp) == 1:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[0])))
except ValueError:
raise ValueError(f'Cannot convert the string {tmp[0]} to a valid date.')
elif len(tmp) == 2:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[1])))
except ValueError:
raise ValueError(
f'Cannot convert the strings in {tmp} to a valid date.')
else:
raise ValueError(f'The input {tmp} cannot be interpreted as a single' +
' day or a time window')
return days_exclude
def expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]:
"""Return a list of days to exclude from a list of TimeWindows.
Args:
periods: List of time windows (first day, last day).
Returns:
days_exclude: a List of obtained by expanding the list in input.
"""
days_exclude = []
for window in periods:
days_exclude += pd.date_range(window.first_day, window.last_day, freq='D')
return list(set(days_exclude))
def overlap_percent(dates_left: List['datetime.datetime'],
dates_right: List['datetime.datetime']) -> float:
"""Find the size of the intersections of two arrays, relative to the first array.
Args:
dates_left: List of datetime.datetime
dates_right: List of datetime.datetime
Returns:
percentage: the percentage of elements of dates_right that also appear in
dates_left
"""
intersection = np.intersect1d(dates_left, dates_right)
percentage = 100 * len(intersection) / len(dates_right)
return percentage
def check_time_periods(geox_data: pd.DataFrame,
start_date_eval: pd.Timestamp,
start_date_aa_test: pd.Timestamp,
experiment_duration_weeks: int,
frequency: str) -> bool:
"""Checks that the geox_data contains the data for the two periods.
Check that the geox_data contains all observations during the evaluation and
AA test periods to guarantee that the experiment lasts exactly a certain
number of days/weeks, depending on the frequency of the data (daily/weekly).
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date_eval: start date of the evaluation period.
start_date_aa_test: start date of the aa test period.
experiment_duration_weeks: int, length of the experiment in weeks.
frequency: str indicating the frequency of the time series. It should be one
of 'infer', 'D', 'W'.
Returns:
bool: a bool, True if the time periods specified pass all the checks
Raises:
ValueError: if part of the evaluation or AA test period are shorter than
experiment_duration (either weeks or days).
"""
if frequency not in ['infer', 'D', 'W']:
raise ValueError(
f'frequency should be one of ["infer", "D", "W"], got {frequency}')
if frequency == 'infer':
tmp = geox_data.copy().set_index(['date', 'geo'])
frequency = infer_frequency(tmp, 'date', 'geo')
if frequency == 'W':
frequency = '7D'
number_of_observations = experiment_duration_weeks
else:
number_of_observations = 7 * experiment_duration_weeks
freq_str = 'weeks' if frequency == '7D' else 'days'
missing_eval = find_missing_dates(geox_data, start_date_eval,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_eval:
raise ValueError(
(f'The evaluation period contains the following {freq_str} ' +
f'{missing_eval} for which we do not have data.'))
missing_aa_test = find_missing_dates(geox_data, start_date_aa_test,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_aa_test:
raise ValueError((f'The AA test period contains the following {freq_str} ' +
f'{missing_aa_test} for which we do not have data.'))
return True
def find_missing_dates(geox_data: pd.DataFrame, start_date: pd.Timestamp,
period_duration_weeks: int,
number_of_observations: int,
frequency: str) -> List[str]:
"""Find missing observations in a time period.
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date: start date of the evaluation period.
period_duration_weeks: int, length of the period in weeks.
number_of_observations: expected number of time points.
frequency: str or pd.DateOffset indicating the frequency of the time series.
Returns:
missing: a list of strings, containing the dates for which data are missing
in geox_data.
"""
days = datetime.timedelta(days=7 * period_duration_weeks - 1)
period_dates = ((geox_data['date'] >= start_date) &
(geox_data['date'] <= start_date + days))
days_in_period = geox_data.loc[
period_dates, 'date'].drop_duplicates().dt.strftime('%Y-%m-%d').to_list()
missing = np.array([])
if len(days_in_period) != number_of_observations:
expected_observations = list(
pd.date_range(start_date, start_date + days,
freq=frequency).strftime('%Y-%m-%d'))
missing = set(expected_observations) - set(days_in_period)
return sorted(missing)
def infer_frequency(data: pd.DataFrame, date_index: str,
series_index: str) -> str:
"""Infers frequency of data from pd.DataFrame with multiple indices.
Infers frequency of data from pd.DataFrame with two indices, one for the slice
name and one for the date-time.
Example:
df = pd.Dataframe{'date': [2020-10-10, 2020-10-11], 'geo': [1, 1],
'response': [10, 20]}
df.set_index(['geo', 'date'], inplace=True)
infer_frequency(df, 'date', 'geo')
Args:
data: a pd.DataFrame for which frequency needs to be inferred.
date_index: string containing the name of the time index.
series_index: string containing the name of the series index.
Returns:
A str, either 'D' or 'W' indicating the most likely frequency inferred
from the data.
Raises:
ValueError: if it is not possible to infer frequency of sampling from the
provided pd.DataFrame.
"""
data = data.sort_values(by=[date_index, series_index])
# Infer most likely frequence for each series_index
series_names = data.index.get_level_values(series_index).unique().tolist()
series_frequencies = []
for series in series_names:
observed_times = data.iloc[data.index.get_level_values(series_index) ==
series].index.get_level_values(date_index)
n_steps = len(observed_times)
if n_steps > 1:
time_diffs = (
observed_times[1:n_steps] -
observed_times[0:(n_steps - 1)]).astype('timedelta64[D]').values
modal_frequency, _ = np.unique(time_diffs, return_counts=True)
series_frequencies.append(modal_frequency[0])
if not series_frequencies:
raise ValueError(
'At least one series with more than one observation must be provided.')
if series_frequencies.count(series_frequencies[0]) != len(series_frequencies):
raise ValueError(
'The provided time series seem to have irregular frequencies.')
try:
frequency = {
1: 'D',
7: 'W'
}[series_frequencies[0]]
except KeyError:
raise ValueError('Frequency could not be identified. Got %d days.' %
series_frequencies[0])
return frequency
def human_readable_number(number: float) -> str:
"""Print a large number in a readable format.
Return a readable format for a number, e.g. 123 milions becomes 123M.
Args:
number: a float to be printed in human readable format.
Returns:
readable_number: a string containing the formatted number.
"""
number = float('{:.3g}'.format(number))
magnitude = 0
while abs(number) >= 1000 and magnitude < 4:
magnitude += 1
number /= 1000.0
readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'tn'][magnitude])
return readable_number
def change_background_row(df: pd.DataFrame, value: float, operation: str,
column: str):
"""Colors a row of a table based on the expression in input.
Color a row in:
- orange if the value of the column satisfies the expression in input
- beige if the value of the column satisfies the inverse expression in input
- green otherwise
For example, if the column has values [1, 2, 3] and we pass 'value' equal to
2, and operation '>', then
- 1 is marked in beige (1 < 2, which is the inverse expression)
- 2 is marked in green (it's not > and it's not <)
- 3 is marked in orange(3 > 2, which is the expression)
Args:
df: the table of which we want to change the background color.
value: term of comparison to be used in the expression.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
column: name of the column to be used for the comparison
Returns:
pd.Series
"""
if _operator_functions[operation](float(df[column]), value):
return pd.Series('background-color: orange', df.index)
elif _operator_functions[_inverse_op[operation]](float(df[column]), value):
return pd.Series('background-color: beige', df.index)
else:
return
|
pd.Series('background-color: lightgreen', df.index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 22:28:36 2021
@author: <NAME> 2021
"""
import os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import mi
class TRH():
"""
Python class TRH for holding data and calculating indicators
The input data is given as pandas DataFrame or Series with datetime index
"""
def __init__(self,
T_i, RH_i,
T_e, RH_e,
T_x, RH_x,
output_folder,
measurement_point_name,
measurement_point_MG_classes):
# Initialisations
self.T_i = T_i
self.RH_i = RH_i
self.T_e = T_e
self.RH_e = RH_e
self.T_x = T_x
self.RH_x = RH_x
self.output_folder = output_folder
self.measurement_point_name = measurement_point_name
self.measurement_point_MG_classes = measurement_point_MG_classes
self.fname_logfile = os.path.join(self.output_folder,
'log_' + self.measurement_point_name + '.txt')
with open(self.fname_logfile, mode='w', encoding='utf-8') as f:
f.write('TRH-1 log file\n')
time_str = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
f.write('Starting at: ' + time_str + '\n\n')
# Calculations and plotting
self.make_basic_plots()
# id 1: Comparison to maximum value
self.calc_RH_x_ecdf()
# id 2 and 3: Comparison to critical values
self.calc_RH_x_crit()
# id 4: M_max < 1
self.calc_M_max()
# id 4 and 5: VI and TI
self.calc_VI_TI()
with open(self.fname_logfile, mode='a') as f:
time_str = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
f.write('Ending at: ' + time_str + '\n\n')
def make_basic_plots(self):
self.xaxis_fmt = mdates.DateFormatter('%Y-%m-%d')
# Plot, indoor air T and RH
fig = plt.figure()
ax = fig.gca()
ax.xaxis.set_major_formatter(self.xaxis_fmt)
fig.autofmt_xdate()
ax.plot(self.T_i.index, self.T_i.values)
ax.set_ylabel('T, C')
ax.grid()
fname = os.path.join(self.output_folder,
'T_i.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
fig = plt.figure()
ax = fig.gca()
ax.xaxis.set_major_formatter(self.xaxis_fmt)
fig.autofmt_xdate()
ax.plot(self.RH_i.index, self.RH_i.values)
ax.set_ylabel('RH, 0-100 %')
ax.grid()
fname = os.path.join(self.output_folder,
'RH_i.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
# Plot, outdoor air conditions
fig = plt.figure()
ax = fig.gca()
ax.xaxis.set_major_formatter(self.xaxis_fmt)
fig.autofmt_xdate()
ax.plot(self.T_e.index, self.T_e.values)
ax.set_ylabel('T, C')
ax.grid()
fname = os.path.join(self.output_folder,
'T_e.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
fig = plt.figure()
ax = fig.gca()
ax.xaxis.set_major_formatter(self.xaxis_fmt)
fig.autofmt_xdate()
ax.plot(self.RH_e.index, self.RH_e.values)
ax.set_ylabel('RH, 0-100 %')
ax.grid()
fname = os.path.join(self.output_folder,
'RH_e.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
# Plot, measurement point "x"
fig = plt.figure()
ax = fig.gca()
ax.xaxis.set_major_formatter(self.xaxis_fmt)
fig.autofmt_xdate()
ax.plot(self.T_x.index, self.T_x.values)
ax.set_ylabel('T, C')
ax.grid()
fname = os.path.join(self.output_folder,
'T_x_' + self.measurement_point_name + '.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
fig = plt.figure()
ax = fig.gca()
ax.xaxis.set_major_formatter(self.xaxis_fmt)
fig.autofmt_xdate()
ax.plot(self.RH_x.index, self.RH_x.values)
ax.set_ylabel('RH, 0-100 %')
ax.grid()
fname = os.path.join(self.output_folder,
'RH_x_' + self.measurement_point_name + '.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
def calc_RH_x_ecdf(self):
# Use RH only by comparing the RH data to a limit value
# ecdf = Empirical cumulative distribution function
# 0, 50 and 100 (%) are the minimum, median and maximum
# To file
p = [100, 99.9, 99.5, 99, 95, 90, 75, 50,
25, 10, 5, 1, 0.5, 0.1, 0]
self.RH_x_ecdf = np.zeros( (len(p), 2) )
for idx, p_val in enumerate(p):
self.RH_x_ecdf[idx, 0] = p_val
self.RH_x_ecdf[idx, 1] = np.percentile(self.RH_x, p_val)
print('np.percentile(RH_x, 99): {:0.1f}'.format(self.RH_x.values[0]))
with open(self.fname_logfile, 'a') as f:
f.write('RH_x empirical cumulative distribution function:\n')
f.write('<Percentile 0-100> <RH 0-100>\n')
np.savetxt(f, self.RH_x_ecdf, fmt='%.02f')
f.write('\n')
# Plot, cdf
x = np.sort(self.RH_x)
y = np.linspace(start=0.0, stop=1.0, num=len(x))
fig = plt.figure()
ax = fig.gca()
ax.plot(x, y)
ax.set_xlabel('RHx, %')
ax.set_ylabel('ecdf, 0-1')
ax.set_xlim((-2, 102))
ax.grid()
fname = os.path.join(self.output_folder,
'RH_x_ecdf_' + self.measurement_point_name + '.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
# Plot, icdf
fig = plt.figure()
ax = fig.gca()
ax.plot(y, x)
ax.set_ylabel('RHx, %')
ax.set_xlabel('ecdf, 0-1')
ax.set_ylim((-2, 102))
ax.grid()
fname = os.path.join(self.output_folder,
'RH_x_icdf_' + self.measurement_point_name + '.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
@staticmethod
def calc_RH_crit(T):
n = len(T)
RH_crit = np.zeros(n)
for idx in range(n):
if T[idx] <= 1.0:
RH_crit[idx] = 0.83*T[idx] + 97.0
else:
RH_crit[idx] = \
np.maximum(-0.00267*T[idx]**3 + 0.16*T[idx]**2 - 3.13*T[idx] + 100.0,
80.0)
return(RH_crit)
def calc_RH_x_crit(self):
# Use both RH and T to create a two-dimensional limit curve
self.RH_x_crit = self.calc_RH_crit(self.T_x)
# plot
T_min = self.T_x.min()
T_max = self.T_x.max()
T_vals = np.linspace(start=T_min, stop=T_max)
RH_vals = self.calc_RH_crit(T_vals)
fig = plt.figure()
ax = fig.gca()
ax.plot(self.T_x, self.RH_x, '.', markersize=0.6)
ax.plot(T_vals, RH_vals)
ax.set_xlabel('T, C')
ax.set_ylabel('RH, %')
ax.set_ylim((-2, 102))
ax.grid()
fname = os.path.join(self.output_folder,
'RH_x_crit_scatter_' \
+ self.measurement_point_name + '.png')
fig.savefig(fname, dpi=100, bbox_inches='tight')
plt.close(fig)
# Proportion of points over the curve
n_pos = np.sum(self.RH_x > self.RH_x_crit)
n_tot = len(self.RH_x)
s = 'Datapisteiden määrä rajakäyrän yläpuolella: ' \
'{} kpl / {} kpl = {:0.1f} %' \
.format(n_pos, n_tot, 100*n_pos/n_tot)
print(s)
with open(self.fname_logfile, 'a') as f:
f.write(s + '\n')
f.write('\n')
# Total time over the curve
idxs = self.RH_x > self.RH_x_crit
dt_total = self.RH_x.index.to_series().diff().sum()
dt_over_curve = self.RH_x.index.to_series().diff().loc[idxs].sum()
n_total_days = dt_total / pd.Timedelta(days=1)
n_over_curve_days = dt_over_curve /
|
pd.Timedelta(days=1)
|
pandas.Timedelta
|
import itertools
import json
import urllib
import urllib.request
import pandas as pd
class ManifestoDataLoader(object):
def __init__(self, api_key):
self.base_url = "https://manifesto-project.wzb.eu/tools"
self.country = "Germany"
self.version = "MPDS2017b"
self.api_key = api_key
self.label2rightleft = {
'right': [104, 201, 203, 305, 401, 402, 407, 414, 505, 601, 603, 605, 606],
'left': [103, 105, 106, 107, 403, 404, 406, 412, 413, 504, 506, 701, 202]
}
def cmp_code_2_left_right_neutral(self, cmp_code):
if cmp_code in self.label2rightleft['left']:
return 'left'
elif cmp_code in self.label2rightleft['right']:
return 'right'
else:
return 'neutral'
@staticmethod
def get_url(url):
return urllib.request.urlopen(url).read().decode()
def get_latest_version(self):
"""
Get the latest version id of the Corpus
"""
versions_url = self.base_url + "/api_list_metadata_versions.json?&api_key=" + self.api_key
versions = json.loads(self.get_url(versions_url))
return versions['versions'][-1]
def get_manifesto_id(self, text_id, version):
"""
Get manifesto id of a text given the text id and a version id
"""
text_key_url = self.base_url + "/api_metadata?keys[]=" + text_id + "&version=" + version + "&api_key=" + self.api_key
text_meta_data = json.loads(self.get_url(text_key_url))
return text_meta_data['items'][0]['manifesto_id']
def get_core(self):
"""
Downloads core data set, including information about all parties
https://manifestoproject.wzb.eu/information/documents/api
"""
url = self.base_url + "/api_get_core?key=" + self.version + "&api_key=" + self.api_key
return json.loads(self.get_url(url))
def get_text_keys(self):
d = self.get_core()
return [p[5:7] for p in d if p[1] == self.country]
def get_text(self, text_id):
"""
Retrieves the latest version of the manifesto text with corresponding labels
"""
# get the latest version of this text
version = self.get_latest_version()
# get the text metadata and manifesto ID
manifesto_id = self.get_manifesto_id(text_id, version)
text_url = self.base_url + "/api_texts_and_annotations.json?keys[]=" + manifesto_id + "&version=" + version + "&api_key=" + self.api_key
text_data = json.loads(self.get_url(text_url))
try:
text = [(t['cmp_code'], t['text']) for t in text_data['items'][0]['items']]
print('Downloaded %d texts for %s' % (len(text_data['items'][0]['items']), text_id))
return text
except:
print('Could not get text %s' % text_id)
def get_texts_per_party(self):
# get all tuples of party/date corresponding to a manifesto text in this country
text_keys = self.get_text_keys()
# get the texts
texts = {t[1] + "_" + t[0]: self.get_text(t[1] + "_" + t[0]) for t in text_keys}
texts = {k: v for k, v in texts.items() if v}
print("Downloaded %d/%d annotated texts" % (len(texts), len(text_keys)))
return texts
def get_texts(self):
texts = self.get_texts_per_party()
return [x for x in list(itertools.chain(*texts.values())) if x[0] != 'NA' and x[0] != '0']
def get_manifesto_texts(self, min_len=10):
print("Downloading texts from manifestoproject.")
manifesto_texts = self.get_texts()
df =
|
pd.DataFrame(manifesto_texts, columns=['cmp_code', 'content'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas as pd
import requests as rq
import lxml.etree as ET
import json
import copy
import subprocess
import pathlib
from collections import defaultdict
from bdd_tester import BDDTester
def remove_comments(etree):
comments = etree.xpath("//comment()")
for c in comments:
p = c.getparent()
p.remove(c)
return etree
def get_datasets(registry_id="", exceptions="", include_urls=""):
datasets = []
if registry_id:
registries = registry_id.split()
for registry in registries:
exceptions = exceptions or []
dataset_df = pd.read_csv("https://iatiregistry.org/csv/download/" + registry)
dataset_df = dataset_df[dataset_df["file-type"] != "organisation"]
dataset_df = dataset_df[~dataset_df["registry-file-id"].isin(exceptions)]
datasets.extend(dataset_df['source-url'].tolist())
datasets.extend(include_urls.split())
return datasets
def all_activities(datasets):
print("Removed unwanted activities and setup comment-removal method")
print("\nCombining {} IATI files \n".format(len(datasets)))
# Start with the first file, with comments removed
big_iati = remove_comments(ET.fromstring(rq.get(datasets[0]).content))
# Start a dictionary to keep track of the additions
merge_log = {datasets[0]: len(big_iati.getchildren())}
# Iterate through the 2nd through last file and
# insert their activtities to into the first
# and update the dictionary
for url in datasets[1:]:
data = remove_comments(ET.fromstring(rq.get(url).content))
merge_log[url] = len(data.getchildren())
big_iati.extend(data.getchildren())
# Print a small report on the merging
print("Files Merged: ")
for file, activity_count in merge_log.items():
print("|-> {} activities from {}".format(activity_count, file))
print("|--> {} in total".format(len(big_iati.getchildren())))
return big_iati
def current_activities(all_activities):
import datetime as dt
from dateutil.relativedelta import relativedelta
# Filter out non-current activities, if appropriate
# See https://github.com/pwyf/latest-index-indicator-definitions/issues/1
log_columns = [
"iati-id",
"status_check",
"planned_end_date_check",
"actual_end_date_check",
"transaction_date_check",
"pwyf_current",
]
count = 1
current_check_log = pd.DataFrame(columns=log_columns)
for activity in all_activities:
status_check = False
planned_end_date_check = False
actual_end_date_check = False
transaction_date_check = False
# print("Activity {} of {}".format(count, len(big_iati)))
if activity.xpath("activity-status[@code=2]"):
status_check = True
if activity.xpath("activity-date[@type=3]/@iso-date"):
date_time_obj = dt.datetime.strptime(activity.xpath("activity-date[@type=3]/@iso-date")[0], "%Y-%m-%d")
if date_time_obj > (dt.datetime.now() - relativedelta(years=1)):
planned_end_date_check = True
if activity.xpath("activity-date[@type=4]/@iso-date"):
date_time_obj = dt.datetime.strptime(activity.xpath("activity-date[@type=4]/@iso-date")[0], "%Y-%m-%d")
if date_time_obj > (dt.datetime.now() - relativedelta(years=1)):
actual_end_date_check = True
if activity.xpath("transaction/transaction-type[@code=2 or @code=3 or @code=4]"):
dates = activity.xpath(
"transaction[transaction-type[@code=2 or @code=3 or @code=4]]/transaction-date/@iso-date"
)
date_truths = [
dt.datetime.strptime(date, "%Y-%m-%d") > (dt.datetime.now() - relativedelta(years=1)) for date in dates
]
if True in date_truths:
transaction_date_check = True
pwyf_current = status_check or planned_end_date_check or actual_end_date_check or transaction_date_check
current_check_log = current_check_log.append(
{
"iati-id": activity.findtext("iati-identifier"),
"status_check": status_check,
"planned_end_date_check": planned_end_date_check,
"actual_end_date_check": actual_end_date_check,
"transaction_date_check": transaction_date_check,
"pwyf_current": pwyf_current,
},
ignore_index=True,
)
count = count + 1
current_check_log.to_csv("current_check_log.csv")
current_activities = copy.deepcopy(all_activities)
cur_length = len(current_activities)
for activity in current_activities:
if (
activity.findtext("iati-identifier")
in current_check_log.loc[current_check_log["pwyf_current"] == False, "iati-id"].values
):
activity.getparent().remove(activity)
print("Removed {} non-current activities from a total of {}.".format((cur_length - len(current_activities)), cur_length))
print("{} current activities remain.".format(len(current_activities)))
return current_activities
def coverage_check(tree, path, manual_list_entry=False):
if manual_list_entry:
denominator = len(tree)
numerator = len(path)
else:
denominator = len(tree.getchildren())
numerator = len(tree.xpath(path))
coverage = numerator / denominator
return denominator, numerator, coverage
def cove_validation(activities):
with open("combined.xml", "wb+") as out_file:
out_file.write(ET.tostring(activities, encoding="utf8", pretty_print=True))
json_validation_filepath = "validation.json"
url = "https://iati.cove.opendataservices.coop/api_test"
files = {"file": open("combined.xml", "rb")}
r = rq.post(url, files=files, data={"name": "combined.xml"})
print(r)
print("CoVE validation was successful." if r.ok else "Something went wrong.")
validation_json = r.json()
with open(json_validation_filepath, "w") as out_file:
json.dump(validation_json, out_file)
print("Validation JSON file has been written to {}.".format(json_validation_filepath))
ruleset_table =
|
pd.DataFrame(data=validation_json["ruleset_errors"])
|
pandas.DataFrame
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["<NAME>", "<NAME>"]
__all__ = ["_TbatsAdapter"]
import numpy as np
import pandas as pd
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
from sktime.forecasting.base._sktime import _SktimeForecaster
from sktime.utils.validation import check_n_jobs
from sktime.utils.validation.forecasting import check_sp
from sktime.utils.validation.forecasting import check_y_X
class _TbatsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):
"""Base class for interfacing tbats forecasting algorithms"""
def __init__(
self,
use_box_cox=None,
box_cox_bounds=(0, 1),
use_trend=None,
use_damped_trend=None,
sp=None,
use_arma_errors=True,
show_warnings=True,
n_jobs=None,
multiprocessing_start_method="spawn",
context=None,
):
self.use_box_cox = use_box_cox
self.box_cox_bounds = box_cox_bounds
self.use_trend = use_trend
self.use_damped_trend = use_damped_trend
self.sp = sp
self.use_arma_errors = use_arma_errors
self.show_warnings = show_warnings
self.n_jobs = n_jobs
self.multiprocessing_start_method = multiprocessing_start_method
self.context = context
# custom sktime args
self._forecaster = None
super(_TbatsAdapter, self).__init__()
def _instantiate_model(self):
n_jobs = check_n_jobs(self.n_jobs)
sp = check_sp(self.sp, enforce_list=True)
return self._ModelClass(
use_box_cox=self.use_box_cox,
box_cox_bounds=self.box_cox_bounds,
use_trend=self.use_trend,
use_damped_trend=self.use_damped_trend,
seasonal_periods=sp,
use_arma_errors=self.use_arma_errors,
show_warnings=self.show_warnings,
n_jobs=n_jobs,
multiprocessing_start_method=self.multiprocessing_start_method,
context=self.context,
)
def fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables (ignored)
Returns
-------
self : returns an instance of self.
"""
self._is_fitted = False
y, X = check_y_X(y, X)
self._set_y_X(y, X)
self._set_fh(fh)
self._forecaster = self._instantiate_model()
self._forecaster = self._forecaster.fit(y)
self._is_fitted = True
return self
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
fh = fh.to_relative(cutoff=self.cutoff)
if not fh.is_all_in_sample(cutoff=self.cutoff):
fh_out = fh.to_out_of_sample(cutoff=self.cutoff)
steps = fh_out.to_pandas().max()
out = pd.DataFrame(
self._forecaster.forecast(steps=steps, confidence_level=1 - alpha)[1]
)
y_out = out["mean"]
# pred_int
lower =
|
pd.Series(out["lower_bound"])
|
pandas.Series
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from io import (
BytesIO,
StringIO,
)
import os
import platform
from urllib.error import URLError
import pytest
from pandas.errors import (
EmptyDataError,
ParserError,
)
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
# TODO(1.4) Please xfail individual tests at release time
# instead of skip
pytestmark = pytest.mark.usefixtures("pyarrow_skip")
@pytest.mark.network
@tm.network(
url=(
"https://raw.github.com/pandas-dev/pandas/main/"
"pandas/tests/io/parser/data/salaries.csv"
),
check_before_test=True,
)
def test_url(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = {"sep": "\t"}
url = (
"https://raw.github.com/pandas-dev/pandas/main/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = {"sep": "\t"}
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = f"{tm.rands(10)}.csv"
msg = r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
@td.skip_if_windows # os.chmod does not work in windows
def test_no_permission(all_parsers):
# GH 23784
parser = all_parsers
msg = r"\[Errno 13\]"
with tm.ensure_clean() as path:
os.chmod(path, 0) # make file unreadable
# verify that this process cannot open the file (not running as sudo)
try:
with open(path):
pass
pytest.skip("Running as sudo.")
except PermissionError:
pass
with pytest.raises(PermissionError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
@pytest.mark.parametrize(
"data,kwargs,expected,msg",
[
# gh-10728: WHITESPACE_LINE
(
"a,b,c\n4,5,6\n ",
{},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# gh-10548: EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
{"comment": "#"},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL_NOP
(
"a,b,c\n4,5,6\n\r",
{},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_COMMENT
(
"a,b,c\n4,5,6#comment",
{"comment": "#"},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# SKIP_LINE
(
"a,b,c\n4,5,6\nskipme",
{"skiprows": [2]},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
{"comment": "#", "skip_blank_lines": False},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# IN_FIELD
(
"a,b,c\n4,5,6\n ",
{"skip_blank_lines": False},
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL
(
"a,b,c\n4,5,6\n\r",
{"skip_blank_lines": False},
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
None,
),
# ESCAPED_CHAR
(
"a,b,c\n4,5,6\n\\",
{"escapechar": "\\"},
None,
"(EOF following escape character)|(unexpected end of data)",
),
# ESCAPE_IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"\\',
{"escapechar": "\\"},
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
# IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"',
{"escapechar": "\\"},
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
],
ids=[
"whitespace-line",
"eat-line-comment",
"eat-crnl-nop",
"eat-comment",
"skip-line",
"eat-line-comment",
"in-field",
"eat-crnl",
"escaped-char",
"escape-in-quoted-field",
"in-quoted-field",
],
)
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_temporary_file(all_parsers):
# see gh-13398
parser = all_parsers
data = "0 0"
with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
new_file.write(data)
new_file.flush()
new_file.seek(0)
result = parser.read_csv(new_file, sep=r"\s+", header=None)
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
# see gh-5500
parser = all_parsers
data = "a,b\n1\x1a,2"
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(all_parsers):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
path = f"__{tm.rands(10)}__.csv"
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)
def test_file_handle_string_io(all_parsers):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
data = "a,b\n1,2"
fh = StringIO(data)
parser.read_csv(fh)
assert not fh.closed
def test_file_handles_with_open(all_parsers, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
for mode in ["r", "rb"]:
with open(csv1, mode) as f:
parser.read_csv(f)
assert not f.closed
def test_invalid_file_buffer_class(all_parsers):
# see gh-15337
class InvalidBuffer:
pass
parser = all_parsers
msg = "Invalid file path or buffer object type"
with pytest.raises(ValueError, match=msg):
parser.read_csv(InvalidBuffer())
def test_invalid_file_buffer_mock(all_parsers):
# see gh-15337
parser = all_parsers
msg = "Invalid file path or buffer object type"
class Foo:
pass
with pytest.raises(ValueError, match=msg):
parser.read_csv(Foo())
def test_valid_file_buffer_seems_invalid(all_parsers):
# gh-16135: we want to ensure that "tell" and "seek"
# aren't actually being used when we call `read_csv`
#
# Thus, while the object may look "invalid" (these
# methods are attributes of the `StringIO` class),
# it is still a valid file-object for our purposes.
class NoSeekTellBuffer(StringIO):
def tell(self):
raise AttributeError("No tell method")
def seek(self, pos, whence=0):
raise AttributeError("No seek method")
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoSeekTellBuffer(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("io_class", [StringIO, BytesIO])
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_read_csv_file_handle(all_parsers, io_class, encoding):
"""
Test whether read_csv does not close user-provided file handles.
GH 36980
"""
parser = all_parsers
expected =
|
DataFrame({"a": [1], "b": [2]})
|
pandas.DataFrame
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B":
|
Categorical(["train", "train", "test"])
|
pandas.Categorical
|
"""Debiasing using reweighing"""
"""
This data recipe performs reweighing debiasing using the AIF360 package.
https://github.com/Trusted-AI/AIF360
<NAME>., <NAME>. Data preprocessing techniques for classification without discrimination.
Knowl Inf Syst 33, 1–33 (2012). https://doi.org/10.1007/s10115-011-0463-8
The transformer splits the original data as specified and returns training, validation, and test sets
with weights added.
1. Update the folder_path and data_file variables to indicate the location of the dataset(s).
2. validation_test_files lists additional validation or test files that need to be updated with weights.
3. validation_split indicates the percentiles at which the original data should be split to create a
validation and test set. If it's empty, no validation or test set is created. [0.7] would create
a 70/30 training/validation split. [0.7, 0.9] would create a 70/20/10 training, validation, and test split.
4. target is the name of the target column.
5. favorable_label and unfavorable_label are the socially positive and negative target value respectively.
6. protected_group_info list of lists, where each sublist contains the name of a protected column,
the unprivledged level, and the privleged level. Each of the protected columns must be binary.
7. From the Datasets section of driverless, click on ADD DATASET and then UPLOAD DATA RECIPE to upload this file.
Be sure to use the specified validation set to be used for validation when a model is trained. The weights
can cause leakage if the validation or test data is used for determining the weights.
"""
import datatable as dt
import numpy as np
import os
from h2oaicore.data import CustomData
from h2oaicore.systemutils import config
class MyReweightingData(CustomData):
_modules_needed_by_name = ['datetime', 'fairlearn', 'aif360', 'sklearn']
@staticmethod
def create_data():
import pandas as pd
from h2oaicore.models_utils import import_tensorflow
tf = import_tensorflow()
# above is because aif360 requires tensorflow
from aif360.datasets import BinaryLabelDataset
from aif360.algorithms.preprocessing.reweighing import Reweighing
"""
Update the below as needed
"""
#########
#########
#########
# Path to the data
folder_path = 'tmp/'
# Data file
data_file = 'housing_train_proc.csv'
full_data_file = folder_path + data_file
if not os.path.isfile(full_data_file):
# for testing, just return something
if config.hard_asserts:
return dt.Frame(np.array([[1, 2, 3], [4, 5, 6]]))
else:
return []
train = pd.read_csv(full_data_file)
validation_test_files = ['housing_test_proc.csv']
validation_split = [0.6, 0.8]
# Target column
target = 'high_priced'
favorable_label = 0
unfavorable_label = 1
# Privleged_group_info = [[Protetected group name 1, prevleged level, unprivleged level], [Protetected group name 2, prevleged level, unprivleged level]]
# The protected group columns need to be binary
protected_group_info = [['hispanic', 0, 1], ['black', 0, 1]]
#########
#########
#########
# Set up protected group info
protected_groups = [group_info[0] for group_info in protected_group_info]
dataset_orig = BinaryLabelDataset(df=train, label_names=[target], favorable_label=favorable_label,
unfavorable_label=unfavorable_label,
protected_attribute_names=protected_groups)
privileged_groups = []
unprivileged_groups = []
for protected_group in protected_group_info:
privileged_groups_dict = {}
unprivileged_groups_dict = {}
privileged_groups_dict[protected_group[0]] = protected_group[1]
unprivileged_groups_dict[protected_group[0]] = protected_group[2]
privileged_groups.append(privileged_groups_dict)
unprivileged_groups.append(unprivileged_groups_dict)
# Fit weights on the full dataset to be used on the external test set, if given
RW_full = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW_full.fit(dataset_orig)
# Split the original data into train, validation, and test if applicable
if len(validation_split) == 1:
dataset_orig_train, dataset_orig_valid = dataset_orig.split(validation_split, shuffle=True)
elif len(validation_split) == 2:
dataset_orig_train_valid, dataset_orig_test = dataset_orig.split([validation_split[1]], shuffle=True)
# Fit the weights on both the validation and test set for the test set split
RW_train_valid = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW_train_valid.fit(dataset_orig_train_valid)
dataset_orig_train, dataset_orig_valid = dataset_orig_train_valid.split(
[validation_split[0] / (validation_split[1])], shuffle=True)
else:
dataset_orig_train = dataset_orig
# Fit weights on the training set only
RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW.fit(dataset_orig_train)
dataset_transf_train = RW.transform(dataset_orig_train)
# Add the weigts to the training set
train_df = pd.DataFrame(dataset_transf_train.features, columns=dataset_transf_train.feature_names)
train_df[target] = dataset_transf_train.labels.ravel()
train_df['weights'] = dataset_transf_train.instance_weights.ravel()
# Create datasets with minimum features calculated the given number of days ahead
dataset_dict = {}
dataset_dict[data_file.split('.')[0] + "_rw_train.csv"] = train_df
# Add weights to the validation split (if a validation split was specified)
if len(validation_split) >= 1:
dataset_transf_valid = RW.transform(dataset_orig_valid)
valid_df = pd.DataFrame(dataset_transf_valid.features, columns=dataset_transf_valid.feature_names)
valid_df[target] = dataset_transf_valid.labels.ravel()
valid_df['weights'] = dataset_transf_valid.instance_weights.ravel()
dataset_dict[data_file.split('.')[0] + "_rw_validation.csv"] = valid_df
# Add weights to the test split (if a test split was specified)
if len(validation_split) >= 2:
dataset_transf_test = RW_train_valid.transform(dataset_orig_test)
test_df = pd.DataFrame(dataset_transf_test.features, columns=dataset_transf_test.feature_names)
test_df[target] = dataset_transf_test.labels.ravel()
test_df['weights'] = dataset_transf_test.instance_weights.ravel()
dataset_dict[data_file.split('.')[0] + "_rw_test.csv"] = test_df
# Add weights to the test files (If provided)
for valid_file in validation_test_files:
valid =
|
pd.read_csv(folder_path + valid_file)
|
pandas.read_csv
|
#!/usr/bin/python3
import argparse
import os
import sys
import webbrowser
from datetime import timedelta
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import requests_cache
from plotly import graph_objs as go
from plotly.subplots import make_subplots
from tqdm import tqdm
from finance_benchmark import config
def get_asset_data(assets_ticker, startdate):
"""Retrieve assets data from yahoo finance
Args:
assets_ticker ([str]): list of assets to download
startdate (str): start date
"""
df = pd.DataFrame()
for ticker in tqdm(assets_ticker): # progress bar when downloading data
ticker = ticker.strip()
# cache data to avoid downloading them again when filtering
session = requests_cache.CachedSession(
cache_name="../cache", backend="sqlite", expire_after=timedelta(days=1)
)
try:
# Get daily closing price
data = web.DataReader(
ticker, data_source="yahoo", start=startdate, session=session
)["Close"]
except Exception:
print("Error fetching : " + ticker)
continue
data = pd.DataFrame({"Date": data.index, ticker: data.values})
data.drop_duplicates(
subset="Date", inplace=True
) # remove duplicate statement for the same day
data.set_index("Date", inplace=True)
df = df.join(data, how="outer") # add asset data to main dataframe
df.sort_index(inplace=True)
df.dropna(axis=1, how="all", inplace=True) # remove null values
# remove assets with less than 60 days of data history
for col in df.columns:
if (len(df) - df[col].isna().sum()) < 60:
df.drop(col, axis=1, inplace=True)
df.drop_duplicates(inplace=True)
df.to_csv("../assets.csv")
return df
# For later improvement it may be better to use sortino ratio instead of sharpe ratio
# https://www.investopedia.com/ask/answers/010815/what-difference-between-sharpe-ratio-and-sortino-ratio.asp
# Edit: after a try I still prefer sharpe ratio for the moment
def optimize_sharpe_ratio(num_portfolios):
"""Optimize portfolio with sharpe ratio
Generate `num_portfolios` portfolio with different proportions for each given asset.
It will calculate sharpe ratio of each one of this portfolio to try to find the best performing one with volatily as low as possible.
Args:
num_portfolios (int): number of portfolio to generate
Returns:
[type]: [description]
"""
data = pd.read_csv("../assets.csv")
data.set_index("Date", inplace=True)
if "Portfolio" in data.columns:
data.drop("Portfolio", 1, inplace=True)
# Get number of assets in portfolio
asset_size = len(data.columns)
# convert daily asset prices into daily returns
returns = data.pct_change()
# calculate mean daily return and covariance of daily returns
mean_daily_returns = np.array(returns.mean())
cov_matrix = np.array(returns.cov())
# set up array to hold results
# Increase the size of the array to hold the return, std deviation and sharpe ratio
results = np.zeros((asset_size + 3, num_portfolios))
results = results.tolist()
for i in range(num_portfolios):
# select random weights for portfolio holdings
weights = np.array(np.random.random(asset_size))
# rebalance weights to sum to 1
weights /= np.sum(weights)
# calculate portfolio return and volatility
portfolio_return = np.sum(mean_daily_returns * weights) * 252
portfolio_std_dev = np.sqrt(
np.dot(weights.T, np.dot(cov_matrix, weights))
) * np.sqrt(252)
# store returns and standard deviation in results array
# store Sharpe Ratio (return / volatility) - risk free rate element excluded for simplicity
results[0][i] = portfolio_return
results[1][i] = portfolio_std_dev
results[2][i] = results[0][i] / results[1][i]
# iterate through the weight vector and add data to results array
weights = weights.tolist()
for j, weight in enumerate(weights):
results[j + 3][i] = weight
# convert results array to Pandas DataFrame
my_columns = ["returns", "stdev", "sharpe"]
my_columns.extend(data.columns)
results = np.array(results)
results_frame = pd.DataFrame(results.T, columns=my_columns)
# Portfolio with highest Sharpe Ratio
max_sharpe_portfolio = results_frame.iloc[results_frame["sharpe"].idxmax()]
# Portfolio with minimum standard deviation
min_vol_port = results_frame.iloc[results_frame["stdev"].idxmin()]
# create scatter plot coloured by Sharpe Ratio
fig = go.Figure(
data=(
go.Scattergl(
x=results_frame.stdev,
y=results_frame.returns,
mode="markers",
marker=dict(
color=results_frame.sharpe,
colorbar=dict(title="Sharpe ratio"),
colorscale="bluered",
reversescale=True,
),
)
)
)
# Add asset proportion of portfolio with best sharpe ratio
fig.add_annotation(
text=max_sharpe_portfolio.to_string().replace("\n", "<br>"),
align="left",
showarrow=False,
xref="paper",
yref="paper",
x=1,
y=0,
bordercolor="black",
borderwidth=1,
)
fig.add_annotation(
x=max_sharpe_portfolio[1], y=max_sharpe_portfolio[0], text="Best sharpe ratio"
)
fig.add_annotation(x=min_vol_port[1], y=min_vol_port[0], text="Lower volatility")
fig.update_layout(xaxis_title="Volatility", yaxis_title="Returns")
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0))
fig.write_html("../result_html/sharpe_fig.html", auto_open=False, full_html=False)
print("####### Optimized portfolio #######")
print(max_sharpe_portfolio[2:])
print("###################################")
return max_sharpe_portfolio
def sharpe_each_asset_chart():
"""Get the sharpe ratio of each asset
Returns:
list: list of assets with less than 0.2 sharpe ratio.
"""
data = pd.read_csv("../assets.csv")
data.set_index("Date", inplace=True)
# convert daily asset prices into daily returns
returns = data.pct_change()
mean_return = returns.mean() * 252
volatility = returns.std() * (252 ** 0.5)
#volatility = returns[returns<0].std() * np.sqrt(252)
sharpe_ratio = mean_return / volatility
ticker_to_eliminate = [
key for key, value in sharpe_ratio.to_dict().items() if value <= 0.2
] # remove assets with ratio less than 0.2
print("Sharpe ratio for each asset: ")
print(dict(zip(sharpe_ratio.index.to_list(), sharpe_ratio.to_list())))
fig = go.Figure(
data=(
go.Scattergl(
x=volatility,
y=mean_return,
text=sharpe_ratio.index.to_list(),
hovertext=sharpe_ratio.to_list(),
textposition="top center",
mode="markers+text",
marker=dict(
color=sharpe_ratio,
colorbar=dict(title="Sharpe ratio"),
colorscale="bluered",
reversescale=True,
),
)
)
)
fig.add_shape(
# diagonal line
type="line",
x0=0,
y0=0,
x1=10,
y1=10,
line=dict(
color="MediumPurple",
width=2,
),
)
fig.update_layout(
xaxis_title="Volatility",
yaxis_title="Returns",
yaxis=dict(range=[0, 2]),
xaxis=dict(range=[0, 2]),
)
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0))
fig.write_html(
"../result_html/sharpe_by_asset_fig.html", auto_open=False, full_html=False
)
return ticker_to_eliminate
def performance_chart(weight):
"""Performance chart of each asset
also include optimized portfolio performance
Args:
weight ([int]): weight of each asset in portfolio
"""
fig = go.Figure()
df = pd.read_csv("../assets.csv")
df.set_index("Date", inplace=True)
df = df.loc[~df.index.duplicated(keep="first")]
df_final = df.copy()
for col in df.columns:
df[col] = df[col] / df[col].at[df[col].first_valid_index()] - 1
# Add optimized portfolio performance line in the graph
df_portfolio = df.copy()
df_portfolio.dropna(inplace=True)
for i in range(len(df_portfolio.columns)):
df_portfolio.iloc[:, [i]] = df_portfolio.iloc[:, [i]] * weight[i]
df_portfolio["Portfolio"] = df_portfolio.sum(axis=1)
df["Portfolio"] = df_portfolio["Portfolio"]
polite_name = config.polite_name
for col in df.columns:
polite_ticker = polite_name[col] if col in polite_name else col
data_col = df[col].dropna()
fig.add_trace(
go.Scattergl(
x=data_col.index, y=data_col.values, mode="lines", name=polite_ticker
)
)
fig.layout.yaxis.tickformat = ",.0%"
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0))
fig.write_html(
"../result_html/evol_asset_price.html", auto_open=False, full_html=False
)
df_final["Portfolio"] = df["Portfolio"]
df_final.to_csv("../assets.csv")
def resample_portfolio_period(weight, period):
"""Change portfolio period index by year or by month
Resample time-series data.
Args:
weight ([int]): weight of each asset in portfolio
period (str): 'Y': yearly and 'M': monthly
Returns:
dataframe: resampled portfolio
"""
df =
|
pd.read_csv("../assets.csv")
|
pandas.read_csv
|
from __future__ import division
import numpy as np
import pandas
import math
import os
import types
import h5py
from six.moves import cPickle as pickle
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("white")
from ML_Tools.Plotting_And_Evaluation.Plotters import *
from ML_Tools.General.Misc_Functions import *
from ML_Tools.General.Ensemble_Functions import ensemblePredict, loadModel
from ML_Tools.General.Batch_Train import getFeature, batchEnsemblePredict
from keras.models import Sequential,model_from_json, load_model
from sklearn.model_selection import StratifiedKFold
dirLoc = "../Data/"
wFactor = 250000/50000
def AMS(s, b):
""" Approximate Median Significance defined as:
AMS = sqrt(
2 { (s + b + b_r) log[1 + (s/(b+b_r))] - s}
)
where b_r = 10, b = background, s = signal, log is natural logarithm """
br = 10.0
radicand = 2 *( (s+b+br) * math.log (1.0 + s/(b+br)) -s)
if radicand < 0:
print('radicand is negative. Exiting')
exit()
else:
return math.sqrt(radicand)
def amsScan(inData, scale=False):
best = [0,-1]
ams = []
for index, row in inData.iterrows():
s = scale[0]*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 1), 'gen_weight'])
b = scale[1]*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 0), 'gen_weight'])
ams.append(AMS(s, b))
if ams[-1] > best[1]:
best = [row['pred_class'], ams[-1]]
print(best)
return ams, best
def foldAMSScan(inData, N=10):
kf = StratifiedKFold(n_splits=N, shuffle=True)
folds = kf.split(inData, inData['gen_target'])
bests = []
for i, (train, test) in enumerate(folds):
bests.append(amsScan(inData.iloc[test], (np.sum(inData[(inData['gen_target'] == 1)]['gen_weight']), np.sum(inData[(inData['gen_target'] == 0)]['gen_weight'])))[1])
print("Fold {}, best AMS {} at cut of {}. Total weights Signal:Bkg. {}:{}".format(i, bests[-1][1], bests[-1][0],
np.sum(inData.iloc[test][inData.gen_target == 1]['gen_weight']),
np.sum(inData.iloc[test][inData.gen_target == 0]['gen_weight'])))
print("Mean cut", np.average([x[0] for x in bests], weights=[1/x[1] for x in bests]), "mean AMS", np.average([x[1] for x in bests], weights=[1/x[1] for x in bests]))
return bests
def amsScanQuick(inData, wFactor=250000./50000.):
s = np.sum(inData.loc[inData['gen_target'] == 1, 'gen_weight'])
b = np.sum(inData.loc[inData['gen_target'] == 0, 'gen_weight'])
tIIs = inData['pred_class'].argsort()
amss = np.empty([len(tIIs)])
amsMax = 0
threshold = 0.0
for tI in range(len(tIIs)):
# don't forget to renormalize the weights to the same sum
# as in the complete training set
amss[tI] = AMS(max(0,s * wFactor),max(0,b * wFactor))
if amss[tI] > amsMax:
amsMax = amss[tI]
threshold = inData['pred_class'].values[tIIs[tI]]
#print tI,threshold
if inData.loc[:, 'gen_target'].values[tIIs[tI]]:
s -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
else:
b -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
print (amsMax, threshold)
return amsMax, threshold
def scoreTest(ensemble, weights):
testData = h5py.File(dirLoc + 'testing.hdf5', "r+")
batchEnsemblePredict(ensemble, weights, testData, ensembleSize=10, verbose=1)
def saveTest(cut, name):
testData = h5py.File(dirLoc + 'testing.hdf5', "r+")
data = pandas.DataFrame()
data['EventId'] = getFeature('EventId', testData)
data['pred_class'] = getFeature('pred', testData)
data['Class'] = 'b'
data.loc[data.pred_class >= cut, 'Class'] = 's'
data.sort_values(by=['pred_class'], inplace=True)
data['RankOrder']=range(1, len(data)+1)
data.sort_values(by=['EventId'], inplace=True)
print (dirLoc + name + '_test.csv')
data.to_csv(dirLoc + name + '_test.csv', columns=['EventId', 'RankOrder', 'Class'], index=False)
def convertToDF(datafile, columns={'gen_target', 'gen_weight', 'pred_class'}, nLoad=-1, setFold=-1):
data =
|
pandas.DataFrame()
|
pandas.DataFrame
|
"""
Test the functionality of the 'PsmDataset' class and the auxiliary
functions in the 'dataset' module
"""
import os
import pytest
import torch
import numpy as np
import pandas as pd
import xenith.dataset
@pytest.fixture
def psm_txt(tmpdir):
"""
Based on one file, make three varieties of xenith files.
Elements 0 and 1 will have the same columns, but slightly
different data.
Element 2 will have different columns but still be valid.
"""
np.random.seed(1)
out_files = [os.path.join(tmpdir, str(i)) for i in range(3)]
test_file = os.path.join("tests", "data", "test.tsv")
base_dat = pd.read_csv(test_file, sep="\t")
# original data
base_dat.to_csv(out_files[0], sep="\t", index=False)
# Modify scores a little
base_dat.Score = base_dat.Score + np.random.normal(size=len(base_dat))
base_dat.PsmId = base_dat.PsmId + "-mod"
base_dat.to_csv(out_files[1], sep="\t", index=False)
# Delete a column
base_dat = base_dat.drop(columns="eVal")
base_dat.to_csv(out_files[2], sep="\t", index=False)
return out_files
def test_parsing(psm_txt):
"""Test that parsing works as expected"""
# Try reading file
xenith.dataset._parse_psms(psm_txt[0], ["scannr"], None)
# Try reading multiple files
xenith.dataset._parse_psms(psm_txt[0:2], ["scannr"], None)
# Try reading multiple, but columns don't match
with pytest.raises(RuntimeError):
xenith.dataset._parse_psms(psm_txt, ["scannr"], None)
# Try reading a file, but it doesn't have a required column
with pytest.raises(RuntimeError):
xenith.dataset._parse_psms(psm_txt[0], ["blah"], None)
@pytest.fixture
def toy_features():
"""
Generate a sample feature dataframe with one column that isn't a
feature.
"""
feat = pd.DataFrame({"A": [1, 2, 3],
"B": [4, 5, 6],
"C": [7, 8, 9],
"D": ["a", "b", "c"]})
return (feat, feat.loc[:, ["A", "B", "C"]])
def test_features(toy_features):
"""Verify basic feature processing and error checking works"""
feat = xenith.dataset._process_features(toy_features[1],
feat_mean=None,
feat_stdev=None,
normalize=True)
val = 1.224745
norm_feat = np.array([[-val]*3, [0]*3, [val]*3])
fmean = np.array([2, 5, 8])
fstdev = np.std([1,2,3], ddof=0)
assert np.allclose(feat[0].values, norm_feat, atol=1e-6)
assert np.allclose(fmean, feat[1])
assert np.allclose(fstdev, feat[2])
# Non-numeric columns should raise a ValueError
with pytest.raises(ValueError):
xenith.dataset._process_features(toy_features[0],
feat_mean=None,
feat_stdev=None,
normalize=True)
def test_feature_norm_off(toy_features):
"""Test that 'normalization' of _process_features() works"""
feat = xenith.dataset._process_features(toy_features[1],
feat_mean=None,
feat_stdev=None,
normalize=False)
assert np.allclose(feat[0].values, toy_features[1].values)
def test_feature_custom_norm(toy_features):
"""
Test using a custom mean and standard deviation in
_process_features() works.
"""
fmean = pd.Series([1, 1, 1], index=["A", "B", "C"])
fstdev = pd.Series([1, 1, 1], index=["A", "B", "C"])
norm_feat = np.transpose(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
feat = xenith.dataset._process_features(toy_features[1],
feat_mean=fmean,
feat_stdev=fstdev,
normalize=True)
assert np.allclose(norm_feat, feat[0].values)
assert np.allclose(fmean.values, feat[1].values)
assert np.allclose(fstdev.values, feat[2].values)
def test_feature_mismatch(toy_features):
"""
Test that discrepancies between the features and the normalization factors
raise appropriate errors.
"""
fmean =
|
pd.Series([1, 1], index=["A", "B"])
|
pandas.Series
|
import numpy as np
import pandas as pd
import pytest
from evalml.data_checks import (
DataCheckMessageCode,
DataCheckWarning,
HighVarianceCVDataCheck
)
high_variance_data_check_name = HighVarianceCVDataCheck.name
hv_pipeline_name = "LogisticRegressionPipeline"
def test_high_variance_cv_data_check_invalid_threshold():
with pytest.raises(ValueError, match="needs to be greater than 0."):
HighVarianceCVDataCheck(threshold=-0.1).validate(pipeline_name=hv_pipeline_name, cv_scores=pd.Series([0, 1, 1]))
def test_high_variance_cv_data_check():
high_variance_cv = HighVarianceCVDataCheck()
assert high_variance_cv.validate(pipeline_name=hv_pipeline_name, cv_scores=[0, 0, 0]) == {"warnings": [], "errors": [], "actions": []}
assert high_variance_cv.validate(pipeline_name=hv_pipeline_name, cv_scores=[1, 1, 1]) == {"warnings": [], "errors": [], "actions": []}
assert high_variance_cv.validate(pipeline_name=hv_pipeline_name, cv_scores=pd.Series([1, 1, 1])) == {"warnings": [], "errors": [], "actions": []}
cv_scores =
|
pd.Series([0, 1, 2, 3])
|
pandas.Series
|
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score, confusion_matrix
from sklearn import svm
from sklearn.model_selection import GridSearchCV, StratifiedKFold, RandomizedSearchCV
import pandas as pd
import numpy as np
import csv
np.random.seed(0)
#START: <NAME>
def runRandomForest():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'bootstrap': [True, False],
'max_depth': [3, 5, 10, 20, 50, 75, 100, None],
'max_features': ['sqrt', 'log2', None],
'min_samples_leaf': [1, 2, 4, 6, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100, 250, 500, 1000, 2000]
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
dataFrame['is_train'] = np.random.uniform(0, 1, len(dataFrame)) <= .7
train, test = dataFrame[dataFrame['is_train']==True], dataFrame[dataFrame['is_train']==False]
print('Number of observations in the training data:', len(train))
print('Number of observations in the test data:',len(test))
features = dataFrame.columns[:13]
clf = RandomForestClassifier()
def grid_search_wrapper(refit_score='precision_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = RandomizedSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=2, n_iter=500)
grid_search.fit(dataFrame[features], dataFrame['num'])
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_precision_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_bootstrap', 'param_min_samples_leaf']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_bootstrap', 'param_min_samples_leaf']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_bootstrap', 'param_min_samples_leaf']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
def runKNN():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'n_neighbors' : np.arange(1, 25, 1)
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
X = np.array(dataFrame.ix[:, 0:13]) # end index is exclusive
y = np.array(dataFrame['num']) # another way of indexing a pandas df
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
clf = KNeighborsClassifier()
def grid_search_wrapper(refit_score='precision_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=-1)
grid_search.fit(X, y)
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_' + refit_score , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_neighbors']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_neighbors']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_neighbors']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
#END: <NAME>
#START: <NAME>
def runSVM():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'C' : [0.001, 0.01, 0.1, 1, 10],
'gamma':[1e-1, 1, 1e1]
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
X = np.array(dataFrame.ix[:, 0:13]) # end index is exclusive
y = np.array(dataFrame['num']) # another way of indexing a pandas df
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
clf = svm.SVC(kernel = 'linear', probability=True)
def grid_search_wrapper(refit_score='accuracy_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=2)
grid_search.fit(X, y)
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_precision_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_C', 'param_gamma']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_C', 'param_gamma']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_C', 'param_gamma']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
def runAdaBoost():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'n_estimators' : [50, 100, 250, 500, 1000, 2000],
'learning_rate':[0.001, 0.01, 0.1, 0.2, 0.3, .5, 1]
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
X = np.array(dataFrame.ix[:, 0:13]) # end index is exclusive
y = np.array(dataFrame['num']) # another way of indexing a pandas df
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
clf = AdaBoostClassifier()
def grid_search_wrapper(refit_score='accuracy_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=2)
grid_search.fit(X, y)
fin =
|
pd.DataFrame(grid_search.cv_results_)
|
pandas.DataFrame
|
#
# This script is intended to add new providers and update existing providers in the datastore from an Excel input file.
#
# Pass the PROD, TEST or DEV base url as the first commandline argument and the input file as the second
# argument. A third argument identifying the out put file is optional. If this is not provided then the output file
# will tbe the timestamp appended with 'new-providers.xlsx'. For example:
#
# $ python3 new-providers.py https://********.cloudfunctions.net "input-file.xlsx" "output-file.xlsx"
#
# An attempt is made to create a new record for each of the entries in the input file.
# If the provider already exists then the record is updated with the values from the input file
# but the code value is not changed so that the provider's link remains valid. In all cases the results
# and the links for the providers are appended to the output Excel file.
#
# The script will try to use the credentials of a locally configured service account so it
# is important to provide the key file as an environment variable e.g.
#
# $ export GOOGLE_APPLICATION_CREDENTIALS="ppe-inventory-dev.json"
#
from google.cloud import datastore
import uuid
import sys
import logging
import pandas as pd
import urllib.parse
from email_validator import validate_email, EmailNotValidError
import datetime
baseUrl = ''
if len(sys.argv) > 2:
baseUrl = sys.argv[1]
input_file = sys.argv[2]
else:
print('Missing arguments i.e. first is the base url for the target environment and second is the input file.')
sys.exit(1)
now = datetime.datetime.now()
logfile = f'{now} new-providers.log'
logging.basicConfig(level=logging.INFO, filename=logfile)
print(f'Writing logs to "{logfile}" file ...')
logging.info(f'Base url is {baseUrl}')
logging.info(f'Input file is {input_file}')
if len(sys.argv) > 3:
output_file = sys.argv[3]
else:
output_file = f'{now} new-providers.xlsx'
sheet_in = pd.read_excel(input_file)
sheet_out =
|
pd.ExcelWriter(output_file)
|
pandas.ExcelWriter
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
# Recurrent Neural Network
# Importing the libraries
import numpy as np
import pandas as pd
# Importing the training set
dataset_train =
|
pd.read_csv('dataset/Google_Stock_Price_Train.csv')
|
pandas.read_csv
|
#!/usr/bin/env python
######
INFO = "Convert results to PDF report"
__version__ = 0.3
######
"""
Title: final_report.py
Author: <NAME>
Date: 06-02-2021 (dd-mm-yyyy)
Description: Convert results to PDF report
"""
import os
import argparse
import pandas as pd
def parse_args():
"""
Argument parser
"""
parser = argparse.ArgumentParser(description=INFO, \
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--sampleName", type=str, required=True,
help="name of sequence sample"),
parser.add_argument("--lineage", type=str, required=True,
help="location to lineage file"),
parser.add_argument("--annotation", type=str, required=False,
help="location to annotation file"),
parser.add_argument("--HVdel", type=str, required=False,
help="location to kma output file for HV69-70"),
parser.add_argument("-o", "--outputDir", type=str, required=False,
help="full path of output folder", default=os.path.abspath("./"))
parser.add_argument("-v", "--version", action="version",
version="%(prog)s {version}".format(version=__version__))
# parse all arguments
args = parser.parse_args()
return args
def fill_html(args):
'''
Code to fill in the placeholders in the html
and generate a html and pdf
:params JSON: JSON object containing all the results
:params outputDir: directory to store the results
:out pdf: pdf report of the results
:out html: html report of the results
'''
import matplotlib
matplotlib.use('Agg')
from weasyprint import HTML
from jinja2 import Environment, FileSystemLoader
print('Start Filling')
localdir = os.path.dirname(os.path.realpath(__file__))
# create and render html file with tables
env = Environment(loader=FileSystemLoader(localdir))
template = env.get_template('report/final_report_template.html')
# location of logo
logo = os.path.join(localdir, "report/logo.png")
logo = logo.replace(' ','%20')
lineage_df = pd.read_csv(args.lineage)
# obtain annotation file and stats
variant_stats_df = pd.read_csv(args.annotation, sep='\t', engine='python', comment='##')
# filter only annotation for better overview
try:
annotation_df = variant_stats_df[['Sample','Position','Var Type','HGVS','Shorthand']]
except KeyError:
annotation_df =
|
pd.DataFrame({'NA': []})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 14:52:35 2019
@author: KatieSi
"""
##############################################################################
### Import Packages
##############################################################################
import numpy as np
import pandas as pd
from datetime import datetime, timedelta, date
import random
##############################################################################
### Set Variables
##############################################################################
# Base Variable
ReportName= 'Water Inspection Prioritzation Model - Inspection Allocation 1'
RunDate = str(date.today())
### Baseline File
InspectionFile = 'InspectionList2019-09-18.csv'
SegmentationFile = 'Segmentation2019-09-18.csv'
SegmentationNoteFile = 'SegmentationNote2019-09-18.csv'
### Allocation totals per fortnight
FirstRunCountF1 = 625
FirstRunCountF2 = 616
FortnightDate1 = '2019-09-09'
FortnightDate2 = '2019-09-23'
##############################################################################
### Import data
##############################################################################
#Uplaod baseline
InspectionList =
|
pd.read_csv(
r"D:\\Implementation Support\\Python Scripts\\scripts\\Import\\" +
InspectionFile)
|
pandas.read_csv
|
"""
Copyright (C) 2013-2019 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
run_checks.py
~~~~~~~~~~~~~
Checks for model consistency and possible errors when preparing run in the backend.
"""
import numpy as np
import pandas as pd
import xarray as xr
from calliope.core.attrdict import AttrDict
from calliope.core.util.observed_dict import UpdateObserverDict
def check_operate_params(model_data):
"""
if model mode = `operate`, check for clashes in capacity constraints.
In this mode, all capacity constraints are set to parameters in the backend,
so can easily lead to model infeasibility if not checked.
Returns
-------
comments : AttrDict
debug output
warnings : list
possible problems that do not prevent the model run
from continuing
errors : list
serious issues that should raise a ModelError
"""
defaults = UpdateObserverDict(
initial_yaml_string=model_data.attrs['defaults'],
name='defaults', observer=model_data
)
run_config = UpdateObserverDict(
initial_yaml_string=model_data.attrs['run_config'],
name='run_config', observer=model_data
)
warnings, errors = [], []
comments = AttrDict()
def _get_param(loc_tech, var):
if _is_in(loc_tech, var) and not model_data[var].loc[loc_tech].isnull().any():
param = model_data[var].loc[loc_tech].values
else:
param = defaults[var]
return param
def _is_in(loc_tech, set_or_var):
try:
model_data[set_or_var].loc[loc_tech]
return True
except (KeyError, AttributeError):
return False
def _set_inf_and_warn(loc_tech, var, warnings, warning_text):
if np.isinf(model_data[var].loc[loc_tech].item()):
return (np.inf, warnings)
elif model_data[var].loc[loc_tech].isnull().item():
var_name = model_data[var].loc[loc_tech] = np.inf
return (var_name, warnings)
else:
var_name = model_data[var].loc[loc_tech] = np.inf
warnings.append(warning_text)
return var_name, warnings
# Storage initial is carried over between iterations, so must be defined along with storage
if ('loc_techs_store' in model_data.dims.keys() and
'storage_initial' not in model_data.data_vars.keys()):
model_data['storage_initial'] = (
xr.DataArray([0.0 for loc_tech in model_data.loc_techs_store.values],
dims='loc_techs_store')
)
model_data['storage_initial'].attrs['is_result'] = 0.0
warnings.append(
'Initial stored energy not defined, set to zero for all '
'loc::techs in loc_techs_store, for use in iterative optimisation'
)
# Operated units is carried over between iterations, so must be defined in a milp model
if ('loc_techs_milp' in model_data.dims.keys() and
'operated_units' not in model_data.data_vars.keys()):
model_data['operated_units'] = (
xr.DataArray([0 for loc_tech in model_data.loc_techs_milp.values],
dims='loc_techs_milp')
)
model_data['operated_units'].attrs['is_result'] = 1
model_data['operated_units'].attrs['operate_param'] = 1
warnings.append(
'daily operated units not defined, set to zero for all '
'loc::techs in loc_techs_milp, for use in iterative optimisation'
)
for loc_tech in model_data.loc_techs.values:
energy_cap = model_data.energy_cap.loc[loc_tech].item()
# Must have energy_cap defined for all relevant techs in the model
if ((np.isinf(energy_cap) or np.isnan(energy_cap)) and
(_get_param(loc_tech, 'energy_cap_min_use') or
(_get_param(loc_tech, 'force_resource') and
_get_param(loc_tech, 'resource_unit') == 'energy_per_cap'))):
errors.append(
'Operate mode: User must define a finite energy_cap (via '
'energy_cap_equals or energy_cap_max) for {}'.format(loc_tech)
)
elif _is_in(loc_tech, 'loc_techs_finite_resource'):
# Cannot have infinite resource area if linking resource and area (resource_unit = energy_per_area)
if _is_in(loc_tech, 'loc_techs_area') and model_data.resource_unit.loc[loc_tech].item() == 'energy_per_area':
if _is_in(loc_tech, 'resource_area'):
area = model_data.resource_area.loc[loc_tech].item()
else:
area = None
if
|
pd.isnull(area)
|
pandas.isnull
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# load Google stock
google_stock = pd.read_csv(
'./GOOG.csv',
index_col=['Date'], # column that will be used as index
parse_dates=True, # parse dates from strings
usecols=['Date', 'Adj Close']) # columns to load
# load Apple stock
apple_stock = pd.read_csv(
'./AAPL.csv',
index_col=['Date'],
parse_dates=True,
usecols=['Date', 'Adj Close'])
# load Amazon stock
amazon_stock = pd.read_csv(
'./AMZN.csv',
index_col=['Date'],
parse_dates=True,
usecols=['Date', 'Adj Close'])
print(google_stock.head())
# create calendar dates between '2000-01-01' and '2016-12-31'
dates = pd.date_range('2000-01-01', '2016-12-31')
# create and empty DataFrame that uses the above dates as indices
all_stocks =
|
pd.DataFrame(index=dates)
|
pandas.DataFrame
|
import bisect
from datetime import datetime
from datetime import timezone
from enum import Enum
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
class LayerLevel(Enum):
"""Represent the granularity level of a layer in an indexer."""
NONE = 0
SECOND = 1
MINUTE = 2
HOUR = 3
DAY = 4
MONTH = 5
QUARTER = 6
YEAR = 7
@classmethod
def levels(cls) -> Dict[int, "LayerLevel"]:
"""Return the layer levels mapping by values."""
return {v.value: v for k, v in cls.__members__.items()}
@classmethod
def get(cls, value: int) -> Dict[int, "LayerLevel"]:
"""Return the layer level by value."""
return cls.levels()[value]
@classmethod
def min(cls) -> "LayerLevel":
"""Return the minimum layer level."""
levels = cls.levels()
return levels[min(levels)]
@classmethod
def max(cls) -> "LayerLevel":
"""Return the maximum layer level."""
levels = cls.levels()
return levels[max(levels)]
def get_deeper_level(self) -> "LayerLevel":
"""Return the next lower layer level."""
return self.levels()[self.value - 1]
def get_shallower_level(self) -> "LayerLevel":
"""Return the next higher layer level."""
return self.levels()[self.value + 1]
def transform(self, ts: datetime) -> datetime:
"""Transofrm the provided timestamp according to the layer level granularity."""
transformer = self._transformers[self]
if not transformer:
return ts
return transformer(ts)
LayerLevel._transformers = {
LayerLevel.NONE: None,
LayerLevel.SECOND: lambda t: t.replace(microsecond=0),
LayerLevel.MINUTE: lambda t: t.replace(second=0, microsecond=0),
LayerLevel.HOUR: lambda t: t.replace(minute=0, second=0, microsecond=0),
LayerLevel.DAY: lambda t: t.replace(hour=0, minute=0, second=0, microsecond=0),
LayerLevel.MONTH: lambda t: t.replace(
day=1,
hour=0,
minute=0,
second=0,
microsecond=0,
),
LayerLevel.QUARTER: lambda t: t.replace(
month={0: 1, 1: 4, 2: 7, 3: 10}[(t.month - 1) // 3],
day=1,
hour=0,
minute=0,
second=0,
microsecond=0,
),
LayerLevel.YEAR: lambda t: t.replace(
month=1,
day=1,
hour=0,
minute=0,
second=0,
microsecond=0,
),
}
MIN_LAYER_LEVEL = LayerLevel.min()
MAX_LAYER_LEVEL = LayerLevel.max()
class Indexer:
"""Base indexer."""
__instance = None
__slots__ = [
"_uri",
"_min_level",
"_max_level",
"_layers",
"_virtual_indexes",
"_last_update",
]
def __init__(
self,
uri: str,
min_level: Optional[LayerLevel] = MIN_LAYER_LEVEL,
max_level: Optional[LayerLevel] = MAX_LAYER_LEVEL,
):
"""Initialize an indexer with the provided layer level ranges."""
self._uri = uri
self._min_level = min_level
self._max_level = max_level
self._layers = None
self._virtual_indexes = None
self._last_update = None
@property
def uri(self) -> str:
"""Return the uri corresponding to the indexer."""
return self._uri
@property
def last_update(self) -> Optional[datetime]:
"""Return the timestamp of the last indexer update."""
return self._last_update
async def load(self, items: List[dict]) -> None:
"""Load the provided items in the indexer."""
raise NotImplementedError()
async def add(self, item: dict) -> None:
"""Add the provided item in the indexer."""
raise NotImplementedError()
async def get(self, date_from: datetime, date_to: datetime) -> np.ndarray:
"""Retrieve the items in the indexer according to the provided time interval."""
raise NotImplementedError()
@staticmethod
def _indexify(date: datetime) -> int:
first_timestamp = datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc)
# TODO: This should be depending on the layer level granularity to avoid
# using huge numbers.
return int((date - first_timestamp).total_seconds() * 10 ** 6)
class MemoryIndexer(Indexer):
"""In-memory indexer."""
def __init__(
self,
min_level: Optional[LayerLevel] = MIN_LAYER_LEVEL,
max_level: Optional[LayerLevel] = MAX_LAYER_LEVEL,
):
"""Initialize an in-memory indexer with the provided layer level ranges."""
super().__init__(":memory:", min_level=min_level, max_level=max_level)
def get(self, date_from: datetime, date_to: datetime) -> np.ndarray:
"""Retrieve the items in the indexer according to the provided time interval."""
if date_from >= date_to:
return np.array([], dtype="object")
date_from = self._min_level.transform(date_from)
date_to = self._min_level.transform(date_to)
vi_index_from = self._indexify(date_from)
vi_index_to = self._indexify(date_to)
v = self._search_in_layer(self._max_level, vi_index_from, vi_index_to)
return np.unique(v[np.flatnonzero(v)])
def _search_in_layer(self, layer_level, vi_index_from, vi_index_to):
index_from = bisect.bisect_left(
self._virtual_indexes[layer_level],
vi_index_from,
)
index_to = (
bisect.bisect_left(self._virtual_indexes[layer_level], vi_index_to) - 1
)
if layer_level == self._min_level:
index_to += 1
if index_from >= index_to:
return np.array([], dtype="object")
postings_lists = self._layers[layer_level][index_from:index_to]
return np.concatenate(postings_lists)
if index_from >= index_to:
return self._search_in_layer(
layer_level.get_deeper_level(),
vi_index_from,
vi_index_to,
)
left = self._search_in_layer(
layer_level.get_deeper_level(),
vi_index_from,
self._virtual_indexes[layer_level][index_from],
)
right = self._search_in_layer(
layer_level.get_deeper_level(),
self._virtual_indexes[layer_level][index_to],
vi_index_to,
)
postings_lists = self._layers[layer_level][index_from:index_to]
center = np.concatenate(postings_lists)
return np.concatenate([left, center, right])
async def load(self, items: List[dict]) -> None:
"""Load the provided items in the indexer."""
def layer(df):
return df["values"].to_numpy()
def virtual_indexes(df):
arr = df["timestamp"].apply(self._indexify).to_numpy()
# TODO: for better memory consumption the data type should be chosen
# wisely. When adding items will be supported, the corresponding
# virtual index value has to be checked agains overflow.
# IDEA: for further squeezing memory, what if data are stored on different
# arrays each with its minimal datatype?
if arr.max() < (2 ** 32 - 1):
arr = arr.astype(np.uint32)
else:
arr = arr.astype(np.uint64)
return arr
def ts_col_name(layer_level):
return f"timestamp_{layer_level.name.lower()}"
df =
|
pd.DataFrame(items)
|
pandas.DataFrame
|
import collections
import math
import multiprocessing
import os
import random
import threading
from copy import deepcopy
import pandas as pd
import numpy as np
import tensorflow as tf
from docluster.core import Model
from docluster.core.document_embedding import TfIdf
from docluster.core.preprocessing import Preprocessor, TokenFilter
from docluster.utils.constants import DistanceMetric, FileType
from docluster.utils.data_fetcher import FileFetcher
from docluster.utils.data_saver import FileSaver
from scipy.special import expit
from .word_embeddings import WordEmbeddings
class Word2Vec(Model):
def __init__(self, preprocessor=None, n_skips=16, n_negative_samples=100, n_words=10000, embedding_size=100, batch_size=32, window_size=10, learning_rate=0.025, n_epochs=1, n_workers=4, do_plot=False):
"""
A Skip-Gram model Word2Vec with multi-thread training capability.
Paramaters:
-----------
preprocessor : Preprocessor
The preprocessor that will tokenize the documents.
The default one also filters punctuation, tokens with numeric
characters and one letter words. Furthermore, no stemming or
lemmatization is applied. All these can be adjusted
by passing a custom preprocessor.
n_skip : int
The number of skips.
n_negative_samples : int
The number of negative samples that are going to collected for each
batch.
n_words : int
The number of words that the vocabulary will have. The filtering is
based on the word frequency. Therefore, less frequent words will not
be included in the vocabulary.
embedding_size : int
The size of the embedding vectors. Usually the more makes the embeddings
more accurate, but this is not always the case. Increasing the size
dramatically affects trainning time.
batch_size : int
The batch size.
window_size : int
The window size where the words to the left and to the right of the words
will give context to the word.
learning_rate : int
The initial learning rate of the gradient decent.
n_epochs : int
The number of epoches the model is going to be trained. Increasing the number
dramatically affects trainning time.
n_workers : int
The number of workers that is going to train the model concurrently.
It is not recommended to use more than the number of core.
do_plot : bool
Attributes:
-----------
embeddings :
The embedding vectors that represents each word
"""
if preprocessor is None:
additional_filters = [lambda token: len(token) == 1]
token_filter = TokenFilter(filter_stop_words=False,
additional_filters=additional_filters)
preprocessor = Preprocessor(do_stem=False, do_lemmatize=False,
parse_html=False, token_filter=token_filter, lower=False)
self.preprocessor = preprocessor
self.n_skips = n_skips
self.n_negative_samples = n_negative_samples
self.embedding_size = embedding_size
self.batch_size = batch_size
self.window_size = window_size
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.n_words = n_words
self.n_workers = n_workers
self._total_loss = 0
self._dist_metric = DistanceMetric.cosine
self.embeddings = WordEmbeddings(size=embedding_size, n_words=n_words)
self.locks = np.ones(n_words)
self.syn1 = np.zeros((n_words, embedding_size))
self.syn1neg = np.zeros((n_words, embedding_size))
def fit(self, documents):
"""
Train the Word2Vec model with the documents.
Paramaters:
-----------
documents : list(str)
the documents that the Word2Vec model is going to learn the embeddings from.
"""
n_words_trained = 0
tokens, self.vocab, data, self._frequencies, self.diction, self.reverse_diction = self._build_dataset(
documents)
n_tokens = len(tokens)
n_vocab = len(self.vocab)
words_per_epoch = n_vocab / self.n_epochs
self._cum_dist = self._build_cum_dist()
def _build_dataset(self, documents):
"""Preprocesses the documents and creates the dataset for fitting."""
# Get the term frequencies without idf
tfidf = TfIdf(do_idf=False, preprocessor=self.preprocessor, n_words=self.n_words)
tfidf.fit(documents)
# Flatten the document tokens to create one long list
tokens = list(np.hstack(np.array(tfidf.document_tokens)))
# Create the vocab list with 'UNK' for vocab that couldn't make the vocab list
vocab = tfidf.vocab
vocab_set = set(vocab)
diction = {token: index for index, token in enumerate(vocab)}
reverse_diction = dict(zip(diction.values(), diction.keys()))
# Turn the long token list into a index references to the diction
data = list(map(lambda token: diction[token]
if token in vocab_set else 0, tokens))
# Get the frequencies of tokens and add the frequency of 'UNK' at the beginning
# frequencies = np.insert(tfidf.total_term_freq, 0, data.count(0))[:self.n_words]
frequencies = tfidf.total_term_freq[:self.n_words]
return tokens, vocab, data, frequencies, diction, reverse_diction
def _build_cum_dist(self, distortion=0.75, domain=2**31 - 1):
freq_total = np.sum(self._frequencies ** distortion)
cum_dist = np.cumsum(self._frequencies) * domain / freq_total
return cum_dist
def _train(self, data, optimizer, loss):
"""Train the model."""
start_index = 0
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
self._sess = sess
self._sess.run(init_op)
for epoch in range(self.n_epochs):
self._train_one_epoch(data, optimizer, loss)
print("Epoch:", (epoch + 1))
self.embeddings = self._embeddings.eval()
print("\nTraining complete!")
def _train_one_example(self, example, label, alpha):
predict_word = model.wv.vocab[word] # target word (NN output)
# input word (NN input/projection layer)
example_index = self._diction[example]
embedding = self.embeddings.vectors[example_index]
lock = self.locks[example_index]
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
# 2d matrix, codelen x layer1_size
l2a = deepcopy(self.syn1[predict_word.point])
prod_term = np.dot(embedding, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
# vector of error gradients multiplied by the learning rate
ga = (1 - predict_word.code - fa) * alpha
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
sgn = (-1.0)**predict_word.code # `ch` function, 0 -> 1, 1 -> -1
lprob = -log(expit(-sgn * prod_term))
self._total_loss += sum(lprob)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(
model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
# vector of error gradients multiplied by the learning rate
gb = (model.neg_labels - fb) * alpha
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
# loss component corresponding to negative sampling
if compute_loss:
# for the sampled words
self._total_loss -= sum(log(expit(-1 * prod_term[1:])))
# for the output word
self._total_loss -= log(expit(prod_term[0]))
if learn_vectors:
# learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
embedding += neu1e * lock_factor
def _train_one_epoch(self, data, optimizer, loss):
"""Train one epoch with workers."""
# Each worker generates a batch and trains it until posion pill
def worker_duty():
"""The duty of a single worker."""
while True:
batch = queue.get()
if batch is None:
break
examples, labels, alphas = batch
for example, label, alpha in batch:
self._train_one_example(example, label, alpha)
def generate_batch():
"""Create a batch for a training step in Word2Vec."""
# Initialize variables
example = np.zeros(self.batch_size)
labels = np.zeros((self.batch_size, 1))
alphas = np.zeros(self.batch_size)
n_items = 0
index = 0
while index < len(data):
reduced_window = random.randint(0, self.window_size)
if data[index] is not None:
left = max(0, index - self.window_size + reduced_window)
right = min((index + self.window_size + 1 -
reduced_window), len(data) - 1)
for pos2 in range(left, right, 1):
if n_items == self.batch_size:
queue.put((example, labels, index))
example = np.zeros(self.batch_size)
labels = np.zeros((self.batch_size, 1))
n_items = 0
if pos2 != index and data[pos2] is not None:
example[n_items] = data[pos2]
labels[n_items] = data[index]
alpha = self.learning_rate - \
(self.learning_rate - 0.001) * (index / self.n_words)
alphas[n_items] = max(0.001, alpha)
n_items += 1
index += 1
# Poison pills
for _ in range(n_workers):
queue.put(None)
# Create a threadsafe queue to store the batch indexes
queue = multiprocessing.Queue(maxsize=2 * self.n_workers)
# Create and run the threads
workers = [threading.Thread(target=generate_batch)]
workers.extend([threading.Thread(target=worker_duty)
for _ in range(self.n_workers - 1)])
for worker in workers:
worker.start()
for thread in workers:
thread.join()
def most_similar_words(self, word, n_words=5, include_similarity=False):
"""
Get the most similar words to a word.
Paramaters:
-----------
word : list(str)
The word that is the point of intrest.
n_words : int
The number of words that is going to be returned.
include_similarity : bool
If to include the similarity score as part of a tuple next to the words.
Return:
-------
similar_words : list(str) or list(tuple(str, float))
The words that are most similar to the word according to the trained
embeddings.
"""
if word in self.vocab:
token_id = self.diction[word]
tiled_embedding = np.tile(self.embeddings[token_id], (self.n_words, 1))
embedding_similarities = self._dist_metric(tiled_embedding, self.embeddings)
most_similar_token_ids = (-embedding_similarities).argsort()
return list(map(lambda token_id: self.reverse_diction[token_id], most_similar_token_ids))
else:
print('not in vocab')
def save_model(self, model_name, file_type=FileType.csv, safe=True, directory_path=None):
"""
Save the fitted model.
Paramaters:
-----------
model_name : str
The model name (also the file name) of the model is going to be saved under.
file_type : FileType
The file type that the model is going to be saved as.
Return:
-------
saved : bool
If the model is saved successfully or not.
"""
if self.embeddings is None:
return False
data =
|
pd.DataFrame(self.embeddings.T)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from scipy import integrate, stats
from numpy import absolute, mean
from pandas import DataFrame
from itertools import islice
import researchpy as rp
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels.stats.multicomp
#####################################################################################################
# Before beginning: make sure the file name matches the version of experiment you want to analyze.
# Ensure the output .csv's reflect the desired version as well.
headers = [
'participant_id',
'block',
'type',
'occurence',
'switch_type',
'accuracy',
]
df_accuracy = pd.read_csv(r'C:\Users\danie\Documents\SURREY\Project_1\task_switching_paradigm\pilot3_withoccurence.csv', usecols = headers)
df_accuracy1 =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
def load_dataset(csv_path):
df_inflacao = pd.read_csv(csv_path, sep=';', decimal=',')
df_inflacao['DTTM'] =
|
pd.to_datetime(df_inflacao['Data'], format='%m/%Y')
|
pandas.to_datetime
|
#!/usr/bin/env python
import rosbag
import argparse
import cv2
import os
import pandas
import shutil
from cv_bridge import CvBridge
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-p","--path", default=os.getcwd(), help="path to bag")
parser.add_argument("-b","--bag", required=True, help="name of bag")
opt = parser.parse_args()
topics = [
"/d435/color/image_raw",
#"/d435/depth/image_rect_raw",
#"/d435/infra1/image_rect_raw",
#"/d435/infra2/image_rect_raw",
#"/t265/fisheye1/image_raw",
#"/t265/fisheye2/image_raw",
]
csv_dict = {
'd435_color_image_raw': [],
#'d435_depth_image_rect_raw': [],
#'d435_infra1_image_rect_raw': [],
#'d435_infra2_image_rect_raw': [],
#'t265_fisheye1_image_raw': [],
#'t265_fisheye2_image_raw': []
}
bridge = CvBridge()
bag = rosbag.Bag(opt.bag)
print("creating: subfolders")
path_root = os.path.join(opt.path, 'result/images')
if os.path.exists(path_root):
shutil.rmtree(path_root)
os.mkdir(path_root)
for t in csv_dict:
path_abs = os.path.join(path_root, t)
if os.path.exists(path_abs):
shutil.rmtree(path_abs)
os.mkdir(path_abs)
print("extracting: images")
len_topics = len(topics)
frame_count = 0
for topic, msg, t in bag.read_messages(topics=topics):
new_topic_name = topic[1:].replace('/', '_')
frame_name = "frame%06i.png" % ( (frame_count // len_topics) + 1)
frame_count += 1
path = os.path.join(new_topic_name, frame_name)
path_abs = os.path.join(path_root, path)
csv_dict[new_topic_name].append([t.__str__(), path])
cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
if new_topic_name == 'd435_color_image_raw':
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
cv2.imwrite(path_abs, cv_img)
print(t,new_topic_name)
print("creating: csv-files")
for t in csv_dict:
path_abs = os.path.join(path_root, t+'.csv')
|
pandas.DataFrame(csv_dict[t])
|
pandas.DataFrame
|
import os, pickle
import pandas as pd
import numpy as np
import seaborn as sns
import statistics
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import missingno as msno
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
import sklearn
from sklearn.feature_selection import SelectPercentile, f_classif
from src.config import Config
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 500)
class Analysis(Config):
def __init__(self):
self.data = {}
def read_file(self, fname=None):
try:
if fname is None:
fname = os.path.join(Config.DATA["INPUT_PATH"])
print("Reading file: {} ...".format(fname))
data = pd.read_csv(fname)
for col in data.columns:
if len(data[col].unique()) < 20 or col in ["12", "64", "95", "target"]:
data[col] = data[col].astype("category")
print("Data import complete for file: {} ...".format(fname))
return data
except FileNotFoundError:
print(fname)
print("File {} is not found ... Please specify the correct path in config.py".format(fname))
def summary_statistics(self, data, dtype):
if dtype == "numerical":
df_stats_num = data.select_dtypes(["float", "int"]).describe()
kurtosis_list = []
skewness_list = []
numerical_column_list = [col for col in df_stats_num]
for col in df_stats_num:
kurtosis_list.append(data[col].kurtosis())
skewness_list.append(data[col].skew())
new_dict_kurtosis = dict(zip(numerical_column_list,kurtosis_list))
new_dict_skewness = dict(zip(numerical_column_list,skewness_list))
new_rows_kurtosis = pd.Series(data = new_dict_kurtosis, name='kurtosis')
new_rows_skewness = pd.Series(data = new_dict_skewness, name='skewness')
# Append the series of kurtosis and skewness to the .describe() dataframe
df_stats_num = df_stats_num.append(new_rows_kurtosis, ignore_index=False)
df_stats_num = df_stats_num.append(new_rows_skewness, ignore_index=False)
if (len(data) > 10):
df_stats_num = pd.DataFrame(df_stats_num.transpose())
# Set skewness and kurtosis type
df_stats_num.loc[df_stats_num['kurtosis'] < 3 , 'kurtosis type'] = 'Platykurtic' # thin tails
df_stats_num.loc[df_stats_num['kurtosis'] == 3 , 'kurtosis type'] = 'Normal - Mesokurtic'
df_stats_num.loc[df_stats_num['kurtosis'] > 3 , 'kurtosis type'] = 'Leptokurtic' # heavy tails
df_stats_num.loc[df_stats_num['skewness'] < 0, 'skewness type'] = 'Negatively Skewed'
df_stats_num.loc[df_stats_num['skewness'] == 0, 'skewness type'] = 'Symmetrical'
df_stats_num.loc[df_stats_num['skewness'] > 0, 'skewness type'] = 'Positively Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > -0.5) & (df_stats_num['skewness'] < 0.5), 'skewness lvl'] \
= 'Fairly Symmetrical'
df_stats_num.loc[(df_stats_num['skewness'] > -1.0) & (df_stats_num['skewness'] < -0.5) , 'skewness lvl'] \
= 'Moderately Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > 0.5) & (df_stats_num['skewness'] < 1.0), 'skewness lvl'] \
= 'Moderately Skewed'
df_stats_num.loc[(df_stats_num['skewness'] > 1.0) | (df_stats_num['skewness'] < -1.0), 'skewness lvl'] \
= 'Highly Skewed'
final_df = df_stats_num
elif dtype == "categorical":
df_stats_cat = data.select_dtypes(["category"]).describe()
if (len(data) > 10):
df_stats_cat = pd.DataFrame(df_stats_cat.transpose())
final_df = df_stats_cat
return final_df
def categorical_barplot(self, data, col, xlabel, title, type="standard"):
fig, ax = plt.subplots(figsize=(15, 5))
if type == "standard":
try:
cat_index = np.unique(data[col], return_counts=True)[0]
cat_df = pd.DataFrame(np.unique(data[col], return_counts=True)[1], index=cat_index)
y = list(cat_df[0])
except:
cat_df = pd.DataFrame(data[col].value_counts())
y = cat_df.iloc[:,0]
x = list(cat_df.index)
elif type == "missing":
x = list(data[col].index)
y = list(data[col])
ax.bar(x, y, color=['grey', 'red', 'green', 'blue', 'cyan'])
for i in range(len(x)):
ax.text(i, y[i], y[i], ha = 'center')
ax.set_title(title, fontsize=14)
ax.set_xlabel(xlabel, fontsize=14)
ax.set_ylabel(col, fontsize=14)
return fig
def data_scaling(self, data):
X = data.loc[:, ~data.columns.isin(['target'])].values
y = data.loc[:,['target']].values
X = pd.DataFrame(StandardScaler().fit_transform(X))
normalized_data= pd.concat([X, pd.DataFrame(y)], axis=1)
return X
def boxplot(self, X, col, start_col, end_col):
if col == 0:
fig, ax = plt.subplots(figsize=(20,8))
sns.boxplot(x="variable", y="value", data=pd.melt(X.iloc[:,:col+11]), ax=ax)
else:
fig, ax = plt.subplots(figsize=(20,8))
sns.boxplot(x="variable", y="value", data=pd.melt(X.iloc[:,start_col:end_col]), ax=ax)
start_col = end_col
end_col = end_col+11
return fig, start_col, end_col
def control_chart(self, data, col, filter=None, type='x'):
if col != "target":
np.random.seed(Config.ANALYSIS_CONFIG["RANDOM_SEED"])
x = data.loc[:,col]
MR = [np.nan]
# Get and append moving ranges
i = 1
for _ in range(1, len(x)):
MR.append(abs(x[i] - x[i-1]))
i += 1
MR = pd.Series(MR)
# Concatenate mR Series with and rename columns
data_plot = pd.concat([x,MR, data.target], axis=1)
data_plot.columns = ["x", "mR", "target"]
if filter is not None:
temp_plot = data_plot[filter:].reset_index(drop=True)
else:
temp_plot = data_plot
# Plot x and mR charts
fig, axs = plt.subplots(1, figsize=(15,7), sharex=True)
# x chart
if type == "x":
xchart = axs.scatter(temp_plot.index, temp_plot['x'], linestyle='-', marker='o', c=temp_plot['target'])
axs.axhline(statistics.mean(data_plot['x']), color='blue')
axs.axhline(statistics.mean(data_plot['x']) + \
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])/1.128, color = 'red', linestyle = 'dashed')
axs.axhline(statistics.mean(data_plot['x']) - \
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])/1.128, color = 'red', linestyle = 'dashed')
axs.set_title('X-chart for column: {}'.format(col))
axs.legend(*xchart.legend_elements())
axs.set(xlabel='Unit', ylabel='Value')
# mR chart
elif type == "mR":
mRchart = axs.scatter(temp_plot.index, temp_plot['mR'], linestyle='-', marker='o', c=temp_plot['target'])
axs.axhline(statistics.mean(data_plot['mR'][1:len(data_plot['mR'])]), color='blue')
axs.axhline(statistics.mean(data_plot['mR'][1:len(data_plot['mR'])]) + \
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])*0.8525, color='red', linestyle ='dashed')
axs.axhline(statistics.mean(data_plot['mR'][1:len(data_plot['mR'])]) -
3*statistics.mean(data_plot['mR'][1:len(data_plot['mR'])])*0.8525, color='red', linestyle ='dashed')
axs.set_ylim(bottom=0)
axs.set_title('mR Chart for column: {}'.format(col))
axs.legend(*mRchart.legend_elements())
axs.set(xlabel='Unit', ylabel='Range')
return fig
def outlier_identification(self, data, selected_cols, mode='feature_engineering'):
for col in selected_cols:
x = data.loc[:,col]
MR = [np.nan]
i = 1
for _ in range(1, len(x)):
MR.append(abs(x[i] - x[i-1]))
i += 1
MR = pd.Series(MR)
temp_data = pd.concat([x, MR, data.target], axis=1)
temp_data.columns = ["x", "mR", "target"]
ucl = statistics.mean(temp_data['x'])+3*statistics.mean(temp_data['mR'][1:len(temp_data['mR'])])/1.128
lcl = statistics.mean(temp_data['x'])-3*statistics.mean(temp_data['mR'][1:len(temp_data['mR'])])/1.128
if mode == 'feature_engineering':
# We flag out the data points that lie outside the ucl and lcl
# Assumption: Target is not available for prediction
data[col+"_flag"] = ((temp_data["x"] < lcl) | (temp_data["x"] > ucl))*1
data[col+"_flag"] = data[col+"_flag"].astype('category')
elif mode == 'outlier_removal':
# Remove outliers if data belongs to majority class
mask = ((temp_data["x"] < lcl) | (temp_data["x"] > ucl)) & (temp_data["target"].astype("int") == 0)
if mask.any():
temp_data.loc[mask,"x"] = np.nan
data[col] = temp_data["x"]
return data
def missingness_heatmap(self, data):
df_missing = data.loc[:, data.isna().any()]
df_missing = df_missing.isna()
missing_cor = df_missing.corr(method='kendall')
mask = np.triu(np.ones_like(missing_cor, dtype=bool))
mask_df = missing_cor.mask(mask)
check = [c for c in mask_df.columns if any(mask_df[c] > 0.1)]
pair = []
correlation = []
if len(check) > 0:
for col in mask_df.columns:
for index in mask_df.index:
if mask_df.loc[index, col] >= 0.4:
pair.append(str(index+" & "+ col))
correlation.append(np.round(mask_df.loc[index, col], 2))
df = pd.DataFrame({'pair': pair, 'correlation': correlation})
df.sort_values(by="correlation", ascending=False, inplace=True)
return df
def missingness_analysis(self, data, type="matrix"):
"""
Display missing data analysis matrix chart and missing data heatmap.
Args:
data (dataframe): Output from read_input()
"""
missing_col = data.isnull().sum()
percent_missing_col = round(missing_col * 100 / len(data), 2)
fig, ax = plt.subplots(figsize=(15, 5))
if type == "matrix":
msno.matrix(data, ax=ax)
elif type == "bar":
msno.bar(data, ax=ax)
return fig
def missingness_class(self, data):
class0 = data.loc[data.target==0]
missing_data_class0 = pd.DataFrame(class0.isna().sum()[class0.isna().sum() != 0], columns=["class_0"])
class1 = data.loc[data.target==1]
missing_data_class1 = pd.DataFrame(class1.isna().sum()[class1.isna().sum() != 0], columns=["class_1"])
class2 = data.loc[data.target==2]
missing_data_class2 = pd.DataFrame(class2.isna().sum()[class2.isna().sum() != 0], columns=["class_2"])
class3 = data.loc[data.target==3]
missing_data_class3 = pd.DataFrame(class3.isna().sum()[class3.isna().sum() != 0], columns=["class_3"])
class4 = data.loc[data.target==4]
missing_data_class4 = pd.DataFrame(class4.isna().sum()[class4.isna().sum() != 0], columns=["class_4"])
final_df = pd.concat([missing_data_class0, missing_data_class1, missing_data_class2, missing_data_class3,\
missing_data_class4], axis=1)
fig, ax = plt.subplots(figsize=(15, 5))
colors = ['grey', 'red', 'green', 'blue', 'cyan']
final_df.plot.bar(stacked=True,
color=colors,
figsize=(10,7),
ax=ax,
title = "Missingness Count by Target Class",
xlabel = "Input Variables",
ylabel= "Missingness Count",
fontsize=14)
return fig
def missingness_correlation(self, data):
high_cor_missing = self.missingness_heatmap(data)
if len(high_cor_missing) > 0:
print('Column pairs with similar pattern of missingness:- \n')
return msno.heatmap(data)
else:
if data.isnull().sum().sum() == 0:
print('There are no missing data in the columns.')
else:
print('There is only one column that has missing data, therefore no coorelation can be done.')
def mice_imputation(self, data):
MICE_imputer = IterativeImputer(random_state=Config.ANALYSIS_CONFIG["RANDOM_SEED"])
imputed_df = MICE_imputer.fit_transform(data)
return imputed_df
def data_transformation(self, data):
summary_numerical = self.summary_statistics(data, "numerical")
filter_data = data.loc[:, ~data.columns.isin(Config.ANALYSIS_CONFIG["BITRIMODAL_DISTRIBUTION"])]
sym_data = data.loc[:, data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] ==\
"Fairly Symmetrical"].index)]
mskew_data = filter_data.loc[:, filter_data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] \
== "Moderately Skewed"].index)]
hskew_data = filter_data.loc[:, filter_data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] \
== "Highly Skewed"].index)]
mpskew_data = mskew_data.loc[:,(mskew_data>=0).all()]
mpskew_tdata = mpskew_data.copy()
for col in mpskew_data.columns:
mpskew_tdata["{}_sqrt".format(col)] = np.sqrt(mpskew_data.loc[:,col])
mnskew_data = mskew_data.loc[:,(mskew_data<0).any()]
mnskew_tdata = mnskew_data.copy()
for col in mnskew_data.columns:
mnskew_tdata["{}_sqrt".format(col)] = np.sqrt(max(mnskew_data.loc[:, col]+1) - mnskew_data.loc[:, col])
hpskew_data = hskew_data.loc[:,(hskew_data>=0).all()]
hpskew_tdata = hpskew_data.copy()
for col in hpskew_data.columns:
hpskew_tdata["{}_log".format(col)] = np.log(hpskew_data.loc[:,col])
hnskew_data = hskew_data.loc[:,(hskew_data<0).any()]
hnskew_tdata = hnskew_data.copy()
for col in hnskew_data.columns:
hnskew_tdata["{}_log".format(col)] = np.log(max(hnskew_data.loc[:, col]+1) - hnskew_data.loc[:, col])
combined_dict = dict(
SYMMETRICAL_DATA = sym_data,
MODPOS_ORIGINAL = mpskew_data,
MODNEG_ORIGINAL = mnskew_data,
HIGHPOS_ORIGINAL = hpskew_data,
HIGHNEG_ORIGINAL = hnskew_data,
MODPOS_TRANSFORMED = mpskew_tdata.loc[:, mpskew_tdata.columns.str.contains("sqrt")],
MODNEG_TRANSFORMED = mnskew_tdata.loc[:, mnskew_tdata.columns.str.contains("sqrt")],
HIGHPOS_TRANSFORMED = hpskew_tdata.loc[:, hpskew_tdata.columns.str.contains("log")],
HIGHNEG_TRANSFORMED = hnskew_tdata.loc[:, hnskew_tdata.columns.str.contains("log")],
TARGET = data[["target"]]
)
combined_df = pd.concat([df for k, df in combined_dict.items()], axis=1)
transform_numerical = self.summary_statistics(combined_df, "numerical")
return combined_dict, transform_numerical
def histogram_plot(self, data, type="before", grid_cols = 5):
if type == "after":
combined_dict, _ = self.data_transformation(data)
mskew_original = pd.concat([combined_dict["MODPOS_ORIGINAL"], combined_dict["MODNEG_ORIGINAL"]], axis=1)
mskew_transformed = pd.concat([combined_dict["MODPOS_TRANSFORMED"], combined_dict["MODNEG_TRANSFORMED"]], \
axis=1)
hskew_original = pd.concat([combined_dict["HIGHPOS_ORIGINAL"], combined_dict["HIGHNEG_ORIGINAL"]], axis=1)
hskew_transformed = pd.concat([combined_dict["HIGHPOS_TRANSFORMED"], combined_dict["HIGHNEG_TRANSFORMED"]],\
axis=1)
original_list = [mskew_original, hskew_original]
transformed_list = [mskew_transformed, hskew_transformed]
skew_name = ["Moderately Skewed", "Highly Skewed"]
for k, df in enumerate(original_list):
print("Histogram plots before and after data transformation for {} variables:".format(skew_name[k].lower()))
fig = plt.figure(figsize=(20,int(len(original_list[k].columns))*3))
spec = GridSpec(ncols=2, nrows=int(len(original_list[k].columns)), figure=fig)
counter = 0
for i, tup in enumerate(original_list[k].iteritems()):
df = list(tup)[1]
ax = plt.subplot(spec[counter, 0])
df.hist(grid=False, bins=30, color='#00B1A9', alpha=0.3, ax=ax)
ax.axvline(x=df.mean(), lw=2.5, ls=':', color='red')
ax.axvline(x=df.median(), lw=2, ls='--', color='purple')
ax.set_title("Histogram for variable {} before transformation".format(original_list[k].columns[i]))
ax.legend(["mean", "median"])
counter += 1
counter = 0
for j, tup in enumerate(transformed_list[k].iteritems()):
df = list(tup)[1]
ax = plt.subplot(spec[counter, 1])
df.hist(grid=False, color='blue', bins=30, ax=ax, alpha=0.3)
ax.axvline(x=df.mean(), lw=2.5, ls=':', color='red')
ax.axvline(x=df.median(), lw=2, ls='--', color='purple')
ax.set_title("Histogram for variable {} after transformation".format(transformed_list[k].columns[j]))
ax.legend(["mean", "median"])
counter += 1
fig.tight_layout()
display(fig)
elif type == "before":
summary_numerical = self.summary_statistics(data, "numerical")
sym_data = data.loc[:, data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] ==\
"Fairly Symmetrical"].index)]
mskew_data = data.loc[:, data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] ==\
"Moderately Skewed"].index)]
hskew_data = data.loc[:, data.columns.isin(summary_numerical[summary_numerical["skewness lvl"] == \
"Highly Skewed"].index)]
skew_list = [sym_data, mskew_data, hskew_data]
skew_name = ["Fairly Symmetrical", "Moderately Skewed", "Highly Skewed"]
for k, df in enumerate(skew_list):
print("Histogram plots for {} variables:".format(skew_name[k].lower()))
fig = plt.figure(figsize=(20,int(len(skew_list[k].columns))*3))
spec = GridSpec(ncols=grid_cols, nrows=int(len(skew_list[k].columns)), figure=fig)
counter = 0
j = 0
for i, tup in enumerate(skew_list[k].iteritems()):
df = list(tup)[1]
ax = plt.subplot(spec[counter,j])
df.hist(grid=False, bins=30, color='#00B1A9', alpha=0.3, ax=ax)
ax.axvline(x=df.mean(), lw=2.5, ls=':', color='red')
ax.axvline(x=df.median(), lw=2, ls='--', color='purple')
ax.set_title("Histogram for variable {}".format(skew_list[k].columns[i]))
ax.legend(["mean", "median"])
j += 1
if j == grid_cols:
counter += 1
j = 0
fig.tight_layout()
display(fig)
def pca_transformation(self, data, retrain=False, fname=None, save=False):
x = data.loc[:, ~data.columns.isin(['target'])].values
y = data.loc[:, ['target']].values
x = StandardScaler().fit_transform(x)
fpath = fname
if retrain:
pca = PCA(random_state=123).fit(x)
# Plot
fig = plt.figure(figsize=(10,8))
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.title('Explained variance ratio per principle component (Before Model Training)')
display(fig)
pca_variance = np.cumsum(pca.explained_variance_ratio_)
# Train PCA
index = np.where(pca_variance > 0.95)[0][0]
pca = PCA(n_components=index, random_state=123)
# Save model
if save:
pickle.dump(pca, open(fpath, 'wb'))
# Load and run pca
pca = pickle.load(open(fpath, "rb"))
pcs = pca.fit_transform(x)
# Plot
fig = plt.figure(figsize=(10,8))
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.title('Explained variance ratio per principle component (After Model Training)')
display(fig)
# Prep data
columns = ['pc'+str(i+1) for i in range(pcs.shape[1])]
pcs_df = pd.DataFrame(data=pcs, columns = columns)
target_df = pd.DataFrame(data=y, columns=['target'])
pca_df =
|
pd.concat([pcs_df, target_df], axis=1)
|
pandas.concat
|
from numpy import loadtxt
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
n = 25
particle = ['NO2', 'O3', 'NO', 'CO', 'PM1', 'PM2.5', 'PM10']
def actual_vs_predictedpj():
select_model = st.sidebar.radio(
"Choose Model ?", ('Xgboost', 'Randomforest', 'KNN', 'Linear Regression', 'Lasso'))
select_particle = st.sidebar.radio(
"Choose Particle ?", ('NO2', 'O3', 'NO', 'CO', 'PM2.5', 'PM10'))
if select_particle == 'NO2':
loc = 0
if select_particle == 'O3':
loc = 1
if select_particle == 'NO':
loc = 2
if select_particle == 'CO':
loc = 3
# if select_particle == 'PM1':
# loc = 4
if select_particle == 'PM2.5':
loc = 4
if select_particle == 'PM10':
loc = 5
if select_model == 'Xgboost':
get_xgboost(loc)
if select_model == 'KNN':
get_knn(loc)
if select_model == 'Randomforest':
get_randomforest(loc)
if select_model == 'Linear Regression':
get_linear_regression(loc)
if select_model == 'Lasso':
get_lasso(loc)
def get_knn(loc):
knn_y_test = loadtxt('ModelsPJ/knn_y_test.csv', delimiter=',')
knn_y_test_pred = loadtxt('ModelsPJ/knn_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(knn_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 = pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'x'])
l2 = list()
l2.append(['Y_Predicted']*n)
l2.append(np.round(knn_y_test_pred[:n, loc], 9))
l2.append(list(range(1, n+1)))
temp2 = np.array(l2).transpose()
x2 = list(range(n+1, 2*n+1))
chart_data2 =
|
pd.DataFrame(temp2, x2, columns=['Data', particle[loc], 'x'])
|
pandas.DataFrame
|
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=funny_cols + ["strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# Test the case when a chromosome should not be split (now implemented with subtract)
df1 = pd.DataFrame(
[
["chrX", 3, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame([["chrX", 4]], columns=["chrom", "pos"])
df2["start"] = df2["pos"].values
df2["end"] = df2["pos"].values
df_result = (
pd.DataFrame(
[
["chrX", 3, 4],
["chrX", 4, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# subtract should ignore null rows
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 1, 5]],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
["chrX", 1, 5],
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_subtracted = pd.DataFrame(
[
["chr1", 1, 4],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_subtracted, bioframe.subtract(df1, df2))
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert len(bioframe.subtract(df1, df2)) == 0 # empty df1 but valid chroms in df2
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df1)
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df2)
def test_setdiff():
cols1 = ["chrom1", "start", "end"]
cols2 = ["chrom2", "start", "end"]
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=cols1 + ["strand", "animal"],
)
df2 = pd.DataFrame(
[
["chrX", 7, 10, "-", "dog"],
["chr1", 6, 10, "-", "cat"],
["chr1", 6, 10, "-", "cat"],
],
columns=cols2 + ["strand", "animal"],
)
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=None,
)
)
== 0
) # everything overlaps
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["animal"],
)
)
== 1
) # two overlap, one remains
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["strand"],
)
)
== 2
) # one overlaps, two remain
# setdiff should ignore nan rows
df1 = pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])[
["chrom1", "start", "end", "strand", "animal"]
]
df1 = df1.astype(
{
"start": pd.Int64Dtype(),
"end":
|
pd.Int64Dtype()
|
pandas.Int64Dtype
|
"""
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
from itertools import product
import numpy as np
import pytest
import pandas as pd
from pandas import _is_numpy_dev
import pandas._testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype,
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummin_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummax_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
# GH#6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = pd.Series(methods[method](s.values))
result = getattr(s, method)()
tm.assert_series_equal(result, expected)
e = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin =
|
pd.Series([False, False, np.nan, False])
|
pandas.Series
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
import numpy as np
import re
import pickle
import time
import pandas as pd
from pathlib import Path
#import utils.gen_utils as utils
import sys
sys.path.insert(0,'/Users/johnfields/Documents/GitHub/personality-prediction/utils')
import gen_utils as utils
def get_inputs(inp_dir, dataset, embed, embed_mode, mode, layer):
""" Read data from pkl file and prepare for training. """
#file = open(inp_dir + dataset + '-' + embed + '-' + embed_mode + '-' + mode + '.pkl', 'rb')
file = open('/Users/johnfields/Documents/GitHub/personality-prediction/pkl_data/essays-bert-base-cls-512_head.pkl','rb')
data = pickle.load(file)
author_ids, data_x, data_y = list(zip(*data))
file.close()
# alphaW is responsible for which BERT layer embedding we will be using
if layer == 'all':
alphaW = np.full([n_hl], 1 / n_hl)
else:
alphaW = np.zeros([n_hl])
alphaW[int(layer) - 1] = 1
# just changing the way data is stored (tuples of minibatches) and
# getting the output for the required layer of BERT using alphaW
inputs = []
targets = []
n_batches = len(data_y)
for ii in range(n_batches):
inputs.extend(np.einsum('k,kij->ij', alphaW, data_x[ii]))
targets.extend(data_y[ii])
inputs = np.array(inputs)
full_targets = np.array(targets)
return inputs, full_targets
def training(dataset,inputs, full_targets):
""" Train MLP model for each trait on 10-fold corss-validtion. """
if (dataset == 'kaggle'):
trait_labels = ['E', 'N', 'F', 'J']
else:
trait_labels = ['EXT', 'NEU', 'AGR', 'CON', 'OPN']
n_splits = 10
fold_acc = {}
expdata = {}
expdata['acc'], expdata['trait'], expdata['fold'] = [], [], []
for trait_idx in range(full_targets.shape[1]):
# convert targets to one-hot encoding
targets = full_targets[:, trait_idx]
n_data = targets.shape[0]
expdata['trait'].extend([trait_labels[trait_idx]] * n_splits)
expdata['fold'].extend(np.arange(1, n_splits + 1))
skf = StratifiedKFold(n_splits=n_splits, shuffle=False)
k = -1
for train_index, test_index in skf.split(inputs, targets):
x_train, x_test = inputs[train_index], inputs[test_index]
y_train, y_test = targets[train_index], targets[test_index]
# converting to one-hot embedding
y_train = tf.keras.utils.to_categorical(y_train, num_classes=n_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=n_classes)
model = tf.keras.models.Sequential()
# define the neural network architecture
model.add(tf.keras.layers.Dense(50, input_dim=hidden_dim, activation='relu'))
model.add(tf.keras.layers.Dense(n_classes))
k += 1
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['mse', 'accuracy'])
history = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size,
validation_data=(x_test, y_test), verbose=0)
expdata['acc'].append(100 * max(history.history['val_accuracy']))
print(expdata)
df = pd.DataFrame.from_dict(expdata)
return df
def logging(df, log_expdata=True):
""" Save results and each models config and hyper parameters."""
df['network'], df['dataset'], df['lr'], df['batch_size'], df['epochs'], df['model_input'], df['embed'], df['layer'], df[
'mode'], df['embed_mode'], df['jobid'] = network, \
dataset, lr, batch_size, epochs, MODEL_INPUT, embed, layer, mode, embed_mode, jobid
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
# %%
import pandas as pd
from pathlib import Path
# import yaml
# %%
def panda_to_yaml(filename, obj_input):
"""Converts and exports a panda dataframe lexicon into a yaml file.
This can be used by pyContextNLP.
Use filename with .yml extension"""
filepath = Path.cwd() / "negation" / "output" / filename
open(filepath, "w")
with open(filepath, "a") as stream:
# Each row represents one document in the yaml file
for row_index in obj_input.index:
# Each column represents one object per document in yaml file
for col in obj_input.columns:
# Value corresponding to curent document and object
value = obj_input.at[row_index, col]
if
|
pd.isna(value)
|
pandas.isna
|
import numpy as np
import numpy as np
import pandas as pd
from sklearn import preprocessing
import pprint
from os import chdir
from sklearn.ensemble import RandomForestClassifier
import sys
#sys.path.insert(0, '//Users/babakmac/Documents/HypDB/relational-causal-inference/source/HypDB')
#from core.cov_selection import *
#from core.explanation import *
#import core.query as sql
#import modules.statistics.cit as ci_test
#from Modules.InformationTheory.info_theo import *
from sklearn.metrics import confusion_matrix
import copy
from sklearn import tree
from utils.read_data import read_from_csv
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
import seaborn as sns
sns.set(style="white") #white background style for seaborn plots
sns.set(style="whitegrid", color_codes=True)
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, log_loss
from sklearn.linear_model import LogisticRegression
import statsmodels.api as sm
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import numpy as np
from scipy import interp
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.linear_model import LogisticRegression
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
import math
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import StandardScaler
def data_split(data,outcome,path,k=5,test_size=0.3):
rs = StratifiedShuffleSplit(n_splits=k, test_size=test_size, random_state=2)
data_y = pd.DataFrame(data[outcome])
data_X = data.drop([outcome], axis=1)
rs.get_n_splits(data_X, data_y)
j = 0
for test, train in rs.split(data_X,data_y):
cur_test = data.iloc[train]
cur_train = data.iloc[test]
cur_train = pd.concat([cur_test, cur_train])
cur_train.to_csv(path + 'train_' + str(j) + '.csv', encoding='utf-8', index=False)
#print(path + 'train_' + str(j) + '.csv')
#cur_test.to_csv(path + 'test_' + str(j) + '.csv', encoding='utf-8', index=False)
#print(len(cur_test.index))
#print(path + 'test_' + str(j) + '.csv')
j +=1
def cross_valid(data,features,D_features,Y_features,X_features,path,k=5):
print('Original Data Size',len(data.index))
train_df = data[features]
dft1 = pd.get_dummies(train_df[X_features])
dft2 = pd.get_dummies(train_df[Y_features])
X = dft1.values
y = dft2.values
y = y.flatten()
cv = StratifiedKFold(n_splits=k,shuffle=True)
#classifier = LogisticRegression()
j = 0
for train, test in cv.split(X, y):
cur_train = train_df.iloc[train]
cur_test = train_df.iloc[test]
cur_train.to_csv(path + 'train_' + str(j) + '.csv', encoding='utf-8', index=False)
print(len(cur_train.index))
print(path + 'train_' + str(j) + '.csv')
cur_test.to_csv(path + 'test_' + str(j) + '.csv', encoding='utf-8', index=False)
print(len(cur_test.index))
print(path + 'test_' + str(j) + '.csv')
j +=1
def strr(list):
return str(['%.3f' % val for val in list])
def pretty(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
print('*****************************************************************************************')
print('\t' * (indent+1) + strr(value))
print('mean:', mean(value))
print('variance:', var(value))
print('*****************************************************************************************')
def test_rep_str(D_features,Y_features,X_features,path1,path2,k=5,droped=False,classifier='log_reg'):
if classifier=='log_reg':
classifier = LogisticRegression()
elif classifier=='rand_forest':
classifier=RandomForestClassifier(max_depth=2, random_state=0)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
MI_inp = dict()
MI_out = dict()
MI_test=dict()
for j in range(0, k):
print(path2+str(j)+'.csv')
cur_train=read_from_csv(path1+str(j)+'.csv')
print(path1+str(j)+'.csv')
cur_test=read_from_csv(path2+str(j)+'.csv')
#atts=cur_train.columns
#atts=atts.tolist()
#list=[att.replace('_x','').replace('_y','') for att in atts]
#atts
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_train, item, Y_features, X_features, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
rmi = round(mi, 3)
print('####################################')
print(len(cur_train.index))
print('Mutul information in train data:', item,'pvalue:' , pval, 'MI:', rmi)
print('####################################')
if item not in MI_inp.keys():
MI_inp[item]= [rmi]
else:
MI_inp[item] = MI_inp[item] +[rmi]
inf = Info(cur_test)
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_test, item, Y_features, X_features, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
mi = round(mi, 3)
print('####################################')
print('MI in test data:', item,'pvalue:' , pval, 'MI:', mi)
print('####################################')
if item not in MI_test.keys():
MI_test[item]= [mi]
else:
MI_test[item] = MI_test[item] +[mi]
mi = inf.CMI(D_features+X_features, Y_features)
mi = round(mi, 3)
print('Predictive Power(traning)', mi)
inf = Info(cur_test)
mi = inf.CMI(D_features, Y_features,X_features)
mi = round(mi, 3)
print('Repaied MI test', mi)
mi = inf.CMI(D_features+X_features, Y_features)
mi = round(mi, 3)
print('Predictive Power(test)', mi)
cur_train[Y_features[0]] = pd.to_numeric(cur_train[Y_features[0]])
ate = cur_train.groupby([D_features[0]])[Y_features[0]].mean()
print(ate)
# m = abs(ate.values[0] - ate.values[1]).value
#ate0.insert(0, m)
#print('Repaied ATE \n', ate)
# new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Repaied J \n', new)
#J1.insert(0,new)
#ate = cur_test.groupby([D_features[0]])[Y_features[0]].mean()
#m = abs(ate.values[0] - ate.values[1]).value
#ate0.insert(0, m)
#print('Repaied ATE test \n', ate)
#new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Repaied J test \n', new)
#J1.insert(0,new)
# print("len",cur_train.columns,len(cur_train.index),cur_train.shape)
# print("len",len(cur_test.index),cur_test.shape)
j += 1
#inf = Info(cur_train)
#MI_inp.insert(0, I)
cur_test['W']=1
train_objs_num = len(cur_train)
dataset = pd.concat(objs=[cur_train[ D_features+X_features], cur_test[ D_features+X_features]], axis=0)
dataset = pd.get_dummies(dataset)
dft1 = dataset[:train_objs_num]
dft4 = dataset[train_objs_num:]
train_X = dft1.values
train_y = cur_train[Y_features[0]].values
# train_y=train_y.flatten()
#if droped:
# dft4 = pd.get_dummies(cur_test[X_features])
#else:
# dft4 = pd.get_dummies(cur_test[ D_features+X_features])
#print(cur_test[D_features+X_features])
dft5 = pd.get_dummies(cur_test[Y_features])
# logit = sm.Logit(train_df['bscore'], train_df['juv_misd_count'])
X = dft4.values
y = dft5.values
y = y.flatten()
#print("#####################",len(train_X),len(train_y),type(train_X),type(train_y),train_X,train_y,X.shape)
print(X.shape,train_X.shape)
kfold = model_selection.KFold(n_splits=10, random_state=7)
modelCV = LogisticRegression()
probas_ = classifier.fit(train_X, train_y).predict_proba(X)
scoring = 'accuracy'
results = model_selection.cross_val_score(modelCV, train_X, train_y, cv=kfold, scoring=scoring)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',mean(results))
#logit = sm.Logit(train_X,cur_train[Y_features[0]])
# fit the model
#result = logit.fit()
#print(probas_)
y_pred = classifier.predict(X)
cur_test.insert(0,'y',y_pred) # insert the outcome into the test dataset
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_test, item, ['y'], X_features, pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
mi = round(mi, 3)
print('*************************')
print(' MI in output',item,'pvalue:' , pval, 'MI:', mi)
print('***************************')
if item not in MI_out.keys():
MI_out[item] = [mi]
else:
MI_out[item] = MI_out[item] + [mi]
print(path1 + str(j) + '.csv')
for item in D_features:
pval, mi = ci_test.ulti_fast_permutation_tst(cur_test, item, ['y'], pvalue=0.01,
debug=False, loc_num_samples=100,
num_samples=100, view=False)
#mi = round(mi, 3)
print('*************************')
print(' MI in output (marginal)',item,'pvalue:' , pval, 'MI:', mi)
print('***************************')
ate = cur_test.groupby([D_features[0]])[['y']].mean()
print(ate)
# print("ATE on on test labels", '\n averagee:', mean(ate1), "variancee", var(ate1))
# print("ATE on on outcome", '\n averagee:', mean(ate2), "variancee", var(ate2))
# print("J on on input", '\n averagee:', mean(J1), "variancee", var(J1))
# print("J on on outcome", '\n averagee:', mean(J2), "variancee", var(J2))
print('####################################')
#ate = cur_test.groupby(D_features)[Y_features[0]].mean()
#m = abs(ate.values[0] - ate.values[1]).value
#ate1.insert(0, m)
ate = cur_test.groupby(D_features)['y'].mean()
#m = abs(ate.values[0] - ate.values[1]).value
#ate2.insert(0, m)
print('ATE on outcome:',ate)
#new=abs(max((ate.values[0] / ate.values[1]) - 1, (ate.values[0] / ate.values[1]) - 1)).value
#print('Outcome J \n', new)
#J2.insert(0,new)
fpr, tpr, thresholds = roc_curve(y, probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
cur_test.to_csv(path1 + '_trained.csv')
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
#print("Mutual Information on repaired traning labels", '\n averagee:', mean(rep_MI_inp), "variancee",var(rep_MI_inp))
#print("ATE on repaired traning labels", '\n averagee:', mean(ate0), "variancee", var(ate0))
#print("Mutual Information on test labels", '\n averagee:', mean(MI_inp.values()), "variancee", var(MI_inp.values()))
#print("Mutual Information on outcome", '\n avg:', mean(MI_out.values()), "variancee", var(MI_out.values()))
print("Mutual Information on train: \n")
pretty(MI_inp)
plt.show()
print("Mutual Information on test: \n")
pretty(MI_test)
#print(" Mutual Information on repaired data", rep_MI_inp)
print("Mutual Information on outcome: \n")
pretty(MI_out)
plt.show()
return MI_out,MI_inp, mean_auc, std_auc
def classification(cur_train,cur_test, dependant, dependee, classifier='log_reg'):
if classifier=='log_reg':
classifier = LogisticRegression()
elif classifier=='rand_forest':
classifier=RandomForestClassifier(max_depth=2, random_state=0)
train_objs_num = len(cur_train)
dataset = pd.concat(objs=[cur_train[dependant], cur_test[ dependant]], axis=0)
dataset = pd.get_dummies(dataset)
dft1 = dataset[:train_objs_num]
dft4 = dataset[train_objs_num:]
train_X = dft1.values
train_y = cur_train[dependee[0]].values
dft5 =
|
pd.get_dummies(cur_test[dependee])
|
pandas.get_dummies
|
#######calculate portfolio return series
import pandas as pd
import numpy as np
#读入收盘价
adjust_price=pd.read_csv("../adjust_price/adjust_price.csv")
adjust_price=adjust_price.set_index('date')
##########################建立函数##############################
def cal_rt(port_info,H):
# get port info 保证传入的表无index
port_info = port_info.set_index('date')
port_info = port_info.dropna(how='all')
# 将持仓信息 根据H切片
port_info = port_info.iloc[range(0, len(port_info), H), :]
# adjust_price pct_change
pct_change = adjust_price.shift(-H) / adjust_price - 1
#####计算组合收益率 扣费前
# stack rt&port_info then get them merged to multiply
rt_stack = pct_change.stack().reset_index()
port_info_stack = port_info.stack().reset_index()
rt_stack = rt_stack.rename(columns={'level_1': 'underlying_symbol', 0: 'rt'})
port_info_stack = port_info_stack.rename(columns={'level_1': 'underlying_symbol', 0: 'port'})
df_combined = port_info_stack.merge(rt_stack, on=['date', 'underlying_symbol'], how='left')
# 多头收益
long_rt = df_combined[df_combined['port'] == 1].groupby('date')['rt'].mean()
# 空头收益
short_rt = df_combined[df_combined['port'] == -1].groupby('date')['rt'].mean()
# 多空
###预处理 收益率变为相反数后取均值
df_combined['rt1']=df_combined['rt']*df_combined['port']
long_short_rt = df_combined.groupby('date')['rt1'].mean()
#####计算换手率及费率
# 多头
port_long_fill = port_info[port_info == 1].fillna(0)
turnover_long = (port_long_fill - port_long_fill.shift(1)).abs().sum(axis=1) / port_long_fill.sum(axis=1)
cost_long = turnover_long * 0.0003
# 空头
port_short_fill = port_info[port_info == -1].fillna(0)
turnover_short = (port_short_fill - port_short_fill.shift(1)).abs().sum(axis=1) / port_short_fill.abs().sum(axis=1)
cost_short = turnover_short * 0.0003
# 多空
port_info_fill = port_info.fillna(0)
turnover = (port_info_fill - port_info_fill.shift(1)).abs().sum(axis=1) / port_info_fill.abs().sum(axis=1)
cost_long_short = turnover * 0.0003
######扣费
long_rt = long_rt - cost_long
short_rt = short_rt - cost_short
long_short_rt = long_short_rt - cost_long_short
#####合并并保存
long_rt = pd.DataFrame(long_rt, columns=['long_rt'])
short_rt = pd.DataFrame(short_rt, columns=['short_rt'])
long_short_rt =
|
pd.DataFrame(long_short_rt, columns=['long_short_rt'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Created on Fri Jul 17 19:42:23 2020
#
# Copyright 2020 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from yahoofinancials import YahooFinancials
import pandas as pd
from datetime import datetime, timedelta
ticker = 'GOOG'
yahoo_financials = YahooFinancials(ticker)
end_date = datetime.today()
historical_stock_prices = yahoo_financials.get_historical_price_data('2004-08-01', end_date.strftime("%Y-%m-%d"), 'weekly')
# Downloading multiple tickers
start_date = datetime.today()-timedelta(30)
stocks = ["TSLA", "AMZN", "GOOG", "MSFT", "FB", "ES=F", "CABK.MC"]
close_price =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
If payment has not been received within 45 days of the close of a deal, the vendor sends a reminder to the customer. By region, what is the current (where "today" is December 11, 2018) total value of contracts to be collected that are more than 45 days past close? More than 90 days? More than 135 days? How does this compare to contracts closed in 2017?
Contracts in 2017 by region:
APAC 87399.0
Africa 23178.0
EMEA 166514.0
Latin America 21355.0
North America 151625.0
"""
import pandas as pd
import numpy as np
import datetime
from datetime import datetime, timedelta
contracts = pd.DataFrame(pd.read_csv('contracts.csv'))
accounts = pd.DataFrame(pd.read_csv('accounts.csv'))
contracts['closingDate'] = pd.to_datetime(contracts['closingDate'])
contracts['paymentDate'] = pd.to_datetime(contracts['paymentDate'])
regionGrouped = accounts.groupby(by = 'region')
# print(regionGrouped.size())
# region
# APAC 596
# Africa 190
# EMEA 1031
# Latin America 202
# North America 981
# dtype: int64
group_APAC = regionGrouped.get_group("APAC")
# # # print(group_APAC.head())
# # # accountID region partnerInvolved
# # # 5 3oy2wf APAC Yes
# # # 8 0w3ynj APAC No
# # # 11 kt2n1f APAC No
# # # 15 ta2z0d APAC No
# # # 19 g8rycf APAC No
group_Africa = regionGrouped.get_group("Africa")
group_EMEA = regionGrouped.get_group("EMEA")
group_LatinAmerica = regionGrouped.get_group("Latin America")
group_NorthAmerica = regionGrouped.get_group("North America")
lst = group_APAC['accountID']
temporary = pd.DataFrame()
val_Size = 0
year_group = contracts[(pd.to_datetime(contracts['closingDate'])>= pd.to_datetime('2017-01-01')) & (pd.to_datetime(contracts['closingDate'])<= pd.to_datetime('2017-12-31'))]
year_group_diff = (pd.to_datetime(year_group['paymentDate']) - pd.to_datetime(year_group['closingDate'])).dt.days
"""
print(year_group_diff.describe())
count 1433.000000
mean 93.782973
std 19.110154
min 41.000000
25% 80.000000
50% 94.000000
75% 107.000000
max 167.000000
dtype: float64
"""
for an_item in lst:
contracts_index = year_group['contractID'].str.contains('DKU-'+an_item)
found_indices = year_group[contracts_index]
temporary = temporary.append(found_indices)
temporary.paymentDate = temporary.paymentDate.replace('', np.nan)
temp_blankDate = temporary[temporary.paymentDate.isnull()]
val_Size = temporary.contractSize * temporary.contractLength
total_Value = sum(val_Size)
print('APAC', total_Value)
###############################################################
lst = group_Africa['accountID']
temporary =
|
pd.DataFrame()
|
pandas.DataFrame
|
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import DateTruncation
from cape_privacy.pandas.transformations import NumericRounding
def _make_apply_numeric_rounding(input, expected_output, ctype, dtype):
transform = NumericRounding(dtype=ctype, precision=1)
df = pd.DataFrame({"amount": input}).astype(dtype)
expected = pd.DataFrame({"amount": expected_output}).astype(dtype)
df["amount"] = transform(df.amount)
return df, expected
def _make_apply_datetruncation(frequency, input_date, expected_date):
transform = DateTruncation(frequency=frequency)
df = pd.DataFrame({"date": [input_date]})
expected = pd.DataFrame({"date": [expected_date]})
df["date"] = transform(df.date)
return df, expected
def test_rounding_float32():
input = [10.8834, 4.21221]
expected_output = [10.9, 4.2]
df, expected = _make_apply_numeric_rounding(
input, expected_output, dtypes.Float, np.float32
)
pdt.assert_frame_equal(df, expected)
def test_rounding_float64():
input = [10.8834, 4.21221]
expected_output = [10.9, 4.2]
df, expected = _make_apply_numeric_rounding(
input, expected_output, dtypes.Double, np.float64
)
pdt.assert_frame_equal(df, expected)
def test_truncate_date_year():
input_date = datetime.date(year=2018, month=10, day=3)
expected_date = datetime.date(year=2018, month=1, day=1)
df, expected = _make_apply_datetruncation("YEAR", input_date, expected_date)
pdt.assert_frame_equal(df, expected)
def test_truncate_datetime_year():
input_date = pd.Timestamp(year=2018, month=10, day=3)
expected_date = pd.Timestamp(year=2018, month=1, day=1)
df, expected = _make_apply_datetruncation("YEAR", input_date, expected_date)
pdt.assert_frame_equal(df, expected)
def test_truncate_datetime_month():
input_date = pd.Timestamp(year=2018, month=10, day=3, hour=9, minute=20, second=25)
expected_date = pd.Timestamp(year=2018, month=10, day=1, hour=0, minute=0, second=0)
df, expected = _make_apply_datetruncation("MONTH", input_date, expected_date)
pdt.assert_frame_equal(df, expected)
def test_truncate_datetime_day():
input_date = pd.Timestamp(year=2018, month=10, day=3, hour=9, minute=20, second=25)
expected_date = pd.Timestamp(year=2018, month=10, day=3, hour=0, minute=0, second=0)
df, expected = _make_apply_datetruncation("DAY", input_date, expected_date)
|
pdt.assert_frame_equal(df, expected)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import Selector
from scrapy import signals
from fooltrader.contract.files_contract import get_kdata_path
from fooltrader.utils.utils import index_df_with_time, to_time_str, to_float
class Sp500Spider(scrapy.Spider):
name = "sp500_spider"
def __init__(self, name=None, **kwargs):
super().__init__(name, **kwargs)
self.security_item = {'id': 'index_nasdaq_sp500',
'code': 'SP500',
'name': 'SP500',
'listDate': '1871-01-01',
'exchange': 'nasdaq',
'type': 'index'}
self.df_close = pd.DataFrame()
self.df_pe =
|
pd.DataFrame()
|
pandas.DataFrame
|
import logging
pvl_logger = logging.getLogger('pvlib')
import datetime
import numpy as np
import pandas as pd
from nose.tools import raises, assert_almost_equals
from numpy.testing import assert_almost_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from pvlib import irradiance
from pvlib import atmosphere
# setup times and location to be tested.
times = pd.date_range(start=datetime.datetime(2014, 6, 24),
end=datetime.datetime(2014, 6, 26), freq='1Min')
tus = Location(32.2, -111, 'US/Arizona', 700)
times_localized = times.tz_localize(tus.tz)
ephem_data = solarposition.get_solarposition(times, tus, method='pyephem')
irrad_data = clearsky.ineichen(times, tus, linke_turbidity=3,
solarposition_method='pyephem')
dni_et = irradiance.extraradiation(times.dayofyear)
ghi = irrad_data['ghi']
# the test functions. these are almost all functional tests.
# need to add physical tests.
def test_extraradiation():
assert_almost_equals(1382, irradiance.extraradiation(300), -1)
def test_extraradiation_dtindex():
irradiance.extraradiation(times)
def test_extraradiation_doyarray():
irradiance.extraradiation(times.dayofyear)
def test_extraradiation_asce():
assert_almost_equals(
1382, irradiance.extraradiation(300, method='asce'), -1)
def test_extraradiation_spencer():
assert_almost_equals(
1382, irradiance.extraradiation(300, method='spencer'), -1)
def test_extraradiation_ephem_dtindex():
irradiance.extraradiation(times, method='pyephem')
def test_extraradiation_ephem_scalar():
assert_almost_equals(
1382, irradiance.extraradiation(300, method='pyephem').values[0], -1)
def test_extraradiation_ephem_doyarray():
irradiance.extraradiation(times.dayofyear, method='pyephem')
def test_grounddiffuse_simple_float():
irradiance.grounddiffuse(40, 900)
def test_grounddiffuse_simple_series():
ground_irrad = irradiance.grounddiffuse(40, ghi)
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0():
ground_irrad = irradiance.grounddiffuse(40, ghi, albedo=0)
assert 0 == ground_irrad.all()
@raises(KeyError)
def test_grounddiffuse_albedo_invalid_surface():
irradiance.grounddiffuse(40, ghi, surface_type='invalid')
def test_grounddiffuse_albedo_surface():
irradiance.grounddiffuse(40, ghi, surface_type='sand')
def test_isotropic_float():
irradiance.isotropic(40, 100)
def test_isotropic_series():
irradiance.isotropic(40, irrad_data['dhi'])
def test_klucher_series_float():
irradiance.klucher(40, 180, 100, 900, 20, 180)
def test_klucher_series():
irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['apparent_azimuth'])
def test_haydavies():
irradiance.haydavies(40, 180, irrad_data['dhi'], irrad_data['dni'],
dni_et,
ephem_data['apparent_zenith'],
ephem_data['apparent_azimuth'])
def test_reindl():
irradiance.reindl(40, 180, irrad_data['dhi'], irrad_data['dni'],
irrad_data['ghi'], dni_et,
ephem_data['apparent_zenith'],
ephem_data['apparent_azimuth'])
def test_king():
irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
def test_perez():
AM = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
irradiance.perez(40, 180, irrad_data['dhi'], irrad_data['dni'],
dni_et, ephem_data['apparent_zenith'],
ephem_data['apparent_azimuth'], AM)
# klutcher (misspelling) will be removed in 0.3
def test_total_irrad():
models = ['isotropic', 'klutcher', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
AM = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
for model in models:
total = irradiance.total_irrad(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=AM,
model=model,
surface_type='urban')
def test_globalinplane():
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['apparent_azimuth'])
airmass = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
gr_sand = irradiance.grounddiffuse(40, ghi, surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['apparent_azimuth'], airmass)
irradiance.globalinplane(
aoi=aoi, dni=irrad_data['dni'], poa_sky_diffuse=diff_perez,
poa_ground_diffuse=gr_sand)
def test_disc_keys():
clearsky_data = clearsky.ineichen(times, tus, linke_turbidity=3)
disc_data = irradiance.disc(clearsky_data['ghi'], ephem_data['zenith'],
ephem_data.index)
assert 'dni' in disc_data.columns
assert 'kt' in disc_data.columns
assert 'airmass' in disc_data.columns
def test_disc_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith =
|
pd.Series([10.567, 72.469], index=times)
|
pandas.Series
|
import pandas as pd
import pymysql
HOST = '127.0.0.1'
PORT = 3306
USER = 'root'
PASSWORD = 'pw'
DB_NAME = 'test'
conn = pymysql.connect(host=HOST, port=PORT, user=USER, password=PASSWORD, db=DB_NAME, charset='utf8')
curs = conn.cursor()
EXCEL_FILE = 'Problem.xlsx'
df =
|
pd.read_excel(EXCEL_FILE)
|
pandas.read_excel
|
from espnff import League
import pandas as pd
from requests import get
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import numpy as np
# Helper
def clean_owner(owner):
if owner == '<NAME>':
return '<NAME>'
elif owner == '<NAME>':
return '<NAME>'
else:
return owner
# For use after matchups have been completed and ESPN records have been updated (Tuesday morning)
def get_scoreboard_df(league_id, year, week):
league = League(league_id, year)
scoreboard = league.scoreboard(week=week)
scoreboard_list_of_dicts = [
{
'Home_Team': matchup.home_team.team_name.title(),
'Home_Owner': clean_owner(matchup.home_team.owner.title()),
'Home_Score': matchup.home_score,
'Home_Wins': matchup.home_team.wins,
'Home_Losses': matchup.home_team.losses,
'Away_Team': matchup.away_team.team_name.title(),
'Away_Owner': clean_owner(matchup.away_team.owner.title()),
'Away_Score': matchup.away_score,
'Away_Wins': matchup.away_team.wins,
'Away_Losses': matchup.away_team.losses,
'Winner': matchup.home_team.team_name.title() if matchup.home_score > matchup.away_score else matchup.away_team.team_name.title(),
'Loser': matchup.home_team.team_name.title() if matchup.home_score < matchup.away_score else matchup.away_team.team_name.title()
}
for matchup in scoreboard
]
cols = ['Team', 'Owner', 'Score', 'Wins', 'Losses']
# Updating Wins and Losses based on this week's result
for matchup_dict in scoreboard_list_of_dicts:
if matchup_dict['Winner'] == matchup_dict['Home_Team']:
for col in cols:
matchup_dict['Winning_' + col] = str(matchup_dict['Home_' + col])
matchup_dict['Losing_' + col] = str(matchup_dict['Away_' + col])
else:
for col in cols:
matchup_dict['Winning_' + col] = str(matchup_dict['Away_' + col])
matchup_dict['Losing_' + col] = str(matchup_dict['Home_' + col])
scoreboard_df = pd.DataFrame(scoreboard_list_of_dicts)
scoreboard_df = scoreboard_df[['Winning_' + col for col in cols] + ['Losing_' + col for col in cols]]
return scoreboard_df
### Transaction Parsers
# use for all
def parse_team1_abbrev(text):
return text.split(' ')[0]
# use for trades
def parse_team2_abbrev(text):
return text.split(' to ')[1].split('.')[0]
# use for trades
def parse_trade_team1_players(text):
team1 = parse_team1_abbrev(text)
players = [x.split(',')[0] for x in text.split(team1 + ' traded ')[1:]]
return ', '.join(players)
# use for trades
def parse_trade_team2_players(text):
team2 = parse_team2_abbrev(text)
players = [x.split(',')[0] for x in text.split(team2 + ' traded ')[1:]]
return ', '.join(players)
# use for adds and add/drops
def parse_added_player(text):
return text.split(' added ')[1].split(',')[0]
# use for drops and add/drops
def parse_dropped_player(text):
return text.split(' dropped ')[1].split(',')[0]
# use for adds and add/drops
def parse_added_player_position(text):
return text.split(' added ')[1].split(' from ')[0].split(' ')[-1].replace('D/ST', '')
# use for drops and add/drops
def parse_dropped_player_position(text):
return text.split(' dropped ')[1].split(' to ')[0].split(' ')[-1].replace('D/ST', '')
def parse_waiver_bid(text):
return text.split(' for ')[1].replace('.', '')
### End of transactions parsers
def clean_abbrev(abbrev):
clean = abbrev.replace('stdy', 'JOSE')
clean = clean.replace('Slon', 'SLON')
clean = clean.replace('ARTE', 'AA')
return clean
# Run every minute - Gives new transactions for current day through 1 month before. Returns None if no new transactions.
def get_transactions_df(league_id, year, abbrevs):
# Get all transactions from current day through 1 month before
current_date = datetime.now().date()
current_date_string = current_date.strftime("%Y%m%d")
last_month_date = current_date - timedelta(days=30)
last_month_date_string = last_month_date.strftime("%Y%m%d")
url = 'http://games.espn.com/ffl/recentactivity?leagueId=' + str(league_id) + '&seasonId=' + str(year) + '&activityType=2&startDate=' + str(last_month_date_string) + '&endDate=' + str(current_date_string) + '&teamId=-1&tranType=-2'
r = get(url)
soup = BeautifulSoup(r.text, 'html.parser')
table = soup.select_one('table.tableBody')
trs = table.find_all('tr')
headers = ['Datetime', 'Transaction', 'Description']
rows = []
for tr in trs[2:]:
rows.append([])
for td in tr:
try:
rows[-1].append(td.text)
except:
pass
rows = [x[:-1] for x in rows]
for x in rows:
x[0] = datetime.strptime(x[0] + str(year), '%a, %b %d%I:%M %p%Y')
x[1] = clean_abbrev(x[1].split('\xa0\xa0')[1].replace('(By LM)', '').replace(' (Waivers)', ''))
x[2] = clean_abbrev(x[2].split(' ')[0]) + ' ' + clean_abbrev(' '.join(x[2].split(' ')[1:]).replace('*', ''))
if 'Trade' in x[1]:
for abbrev in abbrevs:
x[1] = x[1].replace(abbrev + ' ', '')
x[2] = x[2].replace(abbrev, abbrev + '. ')
x[2] = x[2].replace(abbrev[0] + '. ' + abbrev[1:], abbrev)
x[2] = x[2].replace('. ', ' ').strip(' ')
else:
for abbrev in abbrevs:
x[1] = x[1].replace(abbrev + ' ', '')
x[2] = x[2].replace(abbrev, '. ' + abbrev)
x[2] = x[2][2:] + '.'
transactions_df = pd.DataFrame.from_records(rows, columns=headers)
past_transactions_csv_fn = 'past_transactions.csv'
# Reference past transactions csv
past_transactions_df = pd.read_csv(past_transactions_csv_fn, parse_dates=['Datetime'], index_col=False)
# Only keep new transactions
transactions_df_merged_with_indicator = transactions_df.merge(past_transactions_df.drop_duplicates(), how='left', indicator=True)
transactions_df = transactions_df_merged_with_indicator[transactions_df_merged_with_indicator['_merge'] == 'left_only']
if transactions_df.empty:
return None
else:
pd.options.mode.chained_assignment = None
transactions_df['Team1'] = transactions_df.apply(lambda x: parse_team1_abbrev(x.Description), axis=1)
transactions_df['Team2'] = transactions_df.apply(lambda x: parse_team2_abbrev(x.Description) if 'Trade' in x.Transaction else np.NaN, axis=1)
transactions_df['Team1_Traded_Players'] = transactions_df.apply(lambda x: parse_trade_team1_players(x.Description) if 'Trade' in x.Transaction else np.NaN, axis=1)
transactions_df['Team2_Traded_Players'] = transactions_df.apply(lambda x: parse_trade_team2_players(x.Description) if 'Trade' in x.Transaction else np.NaN, axis=1)
transactions_df['Added_Player'] = transactions_df.apply(lambda x: parse_added_player(x.Description) if 'Add' in x.Transaction else np.NaN, axis=1)
transactions_df['Added_Player_Position'] = transactions_df.apply(lambda x: parse_added_player_position(x.Description) if 'Add' in x.Transaction else np.NaN, axis=1)
transactions_df['Dropped_Player'] = transactions_df.apply(lambda x: parse_dropped_player(x.Description) if 'Drop' in x.Transaction else np.NaN, axis=1)
transactions_df['Dropped_Player_Position'] = transactions_df.apply(lambda x: parse_dropped_player_position(x.Description) if 'Drop' in x.Transaction else np.NaN, axis=1)
transactions_df['Is_Waiver'] = transactions_df.apply(lambda x: 1 if ' for $' in x.Description else 0, axis=1)
transactions_df['Waiver_Bid'] = transactions_df.apply(lambda x: parse_waiver_bid(x.Description) if x.Is_Waiver else np.NaN, axis=1)
# Use these new transactions to append to past transactions csv
partial_transactions_df = transactions_df[['Datetime', 'Transaction', 'Description']]
past_transactions_df =
|
pd.concat([partial_transactions_df, past_transactions_df])
|
pandas.concat
|
# Importing Necessary Libraries
import requests
from bs4 import BeautifulSoup
import pandas as pd
import sqlalchemy, pymysql
from sqlalchemy import create_engine
import mysql.connector
from mysql.connector import Error
import configparser
import os
# Database Connection
def fetch_variables():
thisfolder = os.path.dirname(os.path.abspath(__file__))
initfile = os.path.join(thisfolder, 'test.env')
config = configparser.ConfigParser()
config.read(initfile)
global host1, user1, passwd1,max_page
host1 = config.get('DB', 'DB_HOST')
user1 = config.get('DB', 'DB_USERNAME')
passwd1 = config.get('DB', 'DB_PASSWORD')
max_page=config.get('DB','MAX_PAGENATION')
def db_connect_extraction():
mydb1 = mysql.connector.connect(host=host1, user=user1, passwd=passwd1)
mycursor = mydb1.cursor()
try:
mycursor.execute("Create database Glassdoor_Interviews")
except Error as e:
print("Database Created already!")
FAANG_Companies = [{('Amazon', "E6036", 'amazon_interviewdetails')},{('Facebook', 'E40772', 'facebook_interviewdetails')},{('apple', 'E1138', 'apple_interviewdetails')},{('Netflix', 'E11891', 'netflix_interviewdetails')},{('Google', 'E9079', 'google_interviewdetails')}]
for i in FAANG_Companies:
List = []
mydb1 = mysql.connector.connect(host=host1, user=user1, passwd=passwd1,database="Glassdoor_Interviews")
engine = create_engine("mysql+pymysql://{user}:{pw}@{host}/{db}".format(user=user1, pw=passwd1, host=host1,db="glassdoor_interviews"))
mycursor = mydb1.cursor()
for Company, ID, j in i:
table_name = j
_SQL = """SHOW TABLES"""
mycursor.execute("show tables")
results = mycursor.fetchall()
results_list = [item[0] for item in results]
if table_name not in results_list:
print("Table",table_name, 'was not found! in the DB')
for i in range(1, int(max_page)):
URL = 'https://www.glassdoor.co.in/Interview/' + Company + "-Interview-Questions-" + ID + '_P' + str(i) + '.htm?sort.sortType=RD&sort.ascending=false'
headers = {'User-Agent': "Mozilla/5.0 (FAANG_Companies1; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}
response = requests.get(URL, headers=headers)
data = BeautifulSoup(response.text, 'lxml')
containers = data.findAll("li", class_="empReview cf")
for container in containers:
Applied_Dgn = container.find('span', class_="reviewer").text
Applied_Dt = container.find('time', class_='date subtle small').text
Applied_Dt = pd.to_datetime(Applied_Dt)
Interview_Result = container.findAll("span", class_="middle")[0].text
try:
Interview_Exp = container.findAll("span", class_="middle")[1].text
except:
Interview_Exp = "NA"
try:
Interview_Type = container.findAll("span", class_="middle")[2].text
except IndexError:
Interview_Type = "NA"
try:
Applied_loc = container.find('span', class_='authorLocation').text
except AttributeError:
Applied_loc = "NA"
Interview_Details = container.find("p",class_="interviewDetails continueReading interviewContent mb-xsm").text
List.append([Company, Applied_Dgn, Applied_Dt, Interview_Result, Interview_Exp, Interview_Type,Applied_loc,Interview_Details])
FAANG_Interviews = pd.DataFrame(List, columns=['Company', 'Applied_Dgn', 'Applied_Dt', 'Interview_Result','Interview_Exp', 'Interview_Type', 'Applied_loc','Interview_Details'])
FAANG_Interviews.to_sql(j,con=engine, if_exists='fail', chunksize=1000, index=False)
else:
print(table_name, 'was found!')
Max_date = pd.read_sql('select max(Applied_Dt) from (%s)'%(j), con=mydb1)
Database_MaxDate = Max_date['max(Applied_Dt)'][0]
print(Database_MaxDate)
List = []
count=1
while(count>0):
for i in range(count):
URL = 'https://www.glassdoor.co.in/Interview/' + Company + "-Interview-Questions-" + ID + '_P' + str(i) + '.htm?sort.sortType=RD&sort.ascending=false'
headers = {'User-Agent': "Mozilla/5.0 (FAANG_Companies1; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}
response = requests.get(URL, headers=headers)
data = BeautifulSoup(response.text, 'lxml')
containers = data.findAll("li", class_="empReview cf")
for container in containers:
Applied_Dgn = container.find('span', class_="reviewer").text
Applied_Dt = container.find('time', class_='date subtle small').text
Applied_Dt =
|
pd.to_datetime(Applied_Dt)
|
pandas.to_datetime
|
#############################################################################
# Copyright (C) 2020-2021 German Aerospace Center (DLR-SC)
#
# Authors:
#
# Contact: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import unittest
from pandas.core.indexes.base import Index
from pyfakefs import fake_filesystem_unittest
from freezegun import freeze_time
from datetime import date, datetime, time, timedelta
import os
import pandas as pd
from memilio.epidata import getDIVIData as gdd
from memilio.epidata import getDataIntoPandasDataFrame as gd
from memilio.epidata import defaultDict as dd
from unittest.mock import patch, call
class TestGetDiviData(fake_filesystem_unittest.TestCase):
maxDiff = None
path = '/home/DiviData'
test_df = pd.DataFrame(
{
'Date':
['2021-09-08', '2021-09-08', '2021-09-08', '2021-09-08', '2021-09-08',
'2021-09-08', '2021-09-08', '2021-09-08', '2021-09-08', '2021-09-08',
'2021-09-08', '2021-09-08', '2021-09-08', '2021-09-08', '2021-09-08',
'2021-09-08'],
'ICU':
[16, 52, 111, 7, 432, 126, 74, 175, 208, 33, 79, 16, 11, 27, 5, 15],
'ICU_ventilated':
[13, 34, 63, 5, 220, 53, 38, 79, 111, 15, 53, 8, 7, 13, 2, 9],
'ID_State': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
'State':
['Schleswig-Holstein', 'Hamburg', 'Niedersachsen', 'Bremen',
'Nordrhein-Westfalen', 'Hessen', 'Rheinland-Pfalz',
'Baden-Württemberg', 'Bayern', 'Saarland', 'Berlin', 'Brandenburg',
'Mecklenburg-Vorpommern', 'Sachsen', 'Sachsen-Anhalt', 'Thüringen']})
def setUp(self):
self.setUpPyfakefs()
def gdd_calls(self, text=''):
directory = os.path.join(self.path, 'Germany/')
gdd_calls = [
call('Information: Data has been written to',
os.path.join(directory, 'FullData_DIVI.json')),
call('Information: Data has been written to',
os.path.join(directory, 'county_divi'+text+'.json')),
call(
'Information: Data has been written to',
os.path.join(directory, 'state_divi'+text+'.json')),
call(
'Information: Data has been written to',
os.path.join(directory, 'germany_divi'+text+'.json'))]
return gdd_calls
def test_extract_subframe_based_on_dates(self):
(df_raw, df_counties, df_states, df_ger) = gdd.get_divi_data(
out_folder=self.path)
# test if only dates from 08-09-2021 are returned
df_state_testdate = gdd.extract_subframe_based_on_dates(
df_states, date(2021, 9, 8),
date(2021, 9, 8))
pd.testing.assert_frame_equal(self.test_df, df_state_testdate)
@patch('memilio.epidata.getDIVIData.pd.read_json')
@patch('memilio.epidata.getDataIntoPandasDataFrame.loadCsv')
def test_exit_strings(self, mocklcsv, mockrjson):
# test read_data Error call if json file is not found
mockrjson.side_effect = ValueError
with self.assertRaises(SystemExit) as cm:
gdd.get_divi_data(read_data=True, out_folder=self.path)
file_in = os.path.join(self.path, "Germany/FullData_DIVI.json")
exit_string = "Error: The file: " + file_in + " does not exist. "\
"Call program without -r flag to get it."
self.assertEqual(cm.exception.code, exit_string)
# test loadCsv Error if file can't be downloaded
mocklcsv.side_effect = Exception
with self.assertRaises(SystemExit) as cm:
gdd.get_divi_data(read_data=False)
exit_string = "Error: Download link for Divi data has changed."
self.assertEqual(cm.exception.code, exit_string)
@patch('memilio.epidata.getDataIntoPandasDataFrame.loadCsv')
def test_df_empty(self, mocklcsv):
# new test function because of the new mock value
# test Error for empty returned dataframe
mocklcsv.value =
|
pd.DataFrame()
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.