prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#################################
# standard library
import os
import sys
import math
import collections # https://docs.python.org/3/library/collections.html
import itertools # https://docs.python.org/3/library/itertools.html
# third party
import pandas as pd # for loading data and operations with data
import matplotlib.pyplot as plt # for plotting
import numpy as np # useful mathematic and numeric tools
from scipy.io import loadmat
from sklearn import linear_model
import statsmodels.api as sm
#################################
#NOTE: stepwise_selection function used from:
#https://datascience.stackexchange.com/questions/24405/how-to-do-stepwise-regression-using-sklearn/24447#24447
def stepwise_selection(X, y,
initial_list=[],
threshold_in=0.01,
threshold_out = 0.05,
verbose=True):
""" Perform a forward-backward feature selection
based on p-value from statsmodels.api.OLS
Arguments:
X - pandas.DataFrame with candidate features
y - list-like with the target
initial_list - list of features to start with (column names of X)
threshold_in - include a feature if its p-value < threshold_in
threshold_out - exclude a feature if its p-value > threshold_out
verbose - whether to print the sequence of inclusions and exclusions
Returns: list of selected features
Always set threshold_in < threshold_out to avoid infinite looping.
See https://en.wikipedia.org/wiki/Stepwise_regression for the details
"""
included = list(initial_list)
while True:
changed=False
# forward step
excluded = list(set(X.columns)-set(included))
new_pval = | pd.Series(index=excluded) | pandas.Series |
# -*- coding: utf-8 -*-
import app.config.env as env
import pandas as pd
class Capacity:
def __init__(self, capacity=0., unit=None, tenors=[], start=-float("inf"), end=float("inf")):
self.capacity = capacity
self.unit = unit
self.start = start
self.end = end
self.capacities = None
capacities = []
for tenor in tenors:
capacities.apend(capacity if (tenor >= start and tenor < end) else 0.)
self.capacities = pd.Series(capacities, index=tenors)
def schedule(self, tenors):
capacities = []
for tenor in tenors:
capacities.append(self.capacity if (tenor >= self.start and tenor < self.end) else 0.)
self.capacities = | pd.Series(capacities, index=tenors) | pandas.Series |
# -*- coding: utf-8 -*-
'''
Autor: <NAME>, <NAME>, <NAME>, <NAME>
Version: 1.3
Server fuer das hosten des FaSta-Dashboards
Copyright 2018 The Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================
'''
import sys
import dash
import dash_auth
import dash_core_components
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import flask
import pandas as pd
import plotly.graph_objs as go
import pymongo
import threading
from dash.dependencies import Input, Output
import os
import collections
from pprint import pprint
from pymongo.command_cursor import CommandCursor
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from types import *
import pandas as pd
import numpy as np
from pandas import DataFrame
sys.path.append('./Clients')
import folium
from geopy.geocoders import Nominatim
#from sqlalchemy import create_engine
import psycopg2
########################################################################## #############################################################################################################################################
########################################################################## Web Application #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Konstanten
MONGO_URL = os.environ.get('MONGO_URI')
POSTGRESS_URL = os.environ.get('POSTGRES_URL')
HOST_ID = '0.0.0.0'
PORT = '37002'
print('Fasta Server initialisiert!')
def createGraphDataForEscalatorPage(numberOfLastEntries: int):
ergDF = pd.DataFrame(columns=['Datum', 'Anzahl_Ausfälle'])
facilities_collection = facilities.find({})
pandas_facilities = pd.DataFrame(list(facilities_collection))
pandas_facilities = pandas_facilities[['equipmentnumber', 'datetime', 'state']]
facilities_distinct = pandas_facilities
facilities_distinct.columns = ['ID', 'Datum', 'Status']
facilities_distinct['Datum'] = pd.to_datetime(facilities_distinct['Datum'], format="%Y-%m-%d_%H-%M-%S")
facilities_distinct['Datum'] = facilities_distinct['Datum'].dt.strftime('%Y-%m-%d')
facilities_distinct_inactive = facilities_distinct[facilities_distinct['Status'] == 'INACTIVE']
dfOnlyDatetime = pd.DataFrame(facilities_distinct_inactive['Datum'], columns=['Datum']).drop_duplicates()
facilities_distinct_inactive_latestDate = facilities_distinct_inactive.groupby('ID')['Datum'].max()
counter = 0
for index, row in dfOnlyDatetime.iterrows():
counter = 0
for key, value in facilities_distinct_inactive_latestDate.items():
if value == row['Datum']:
counter += 1
ergDF.loc[index] = row['Datum'], counter
ergDF = ergDF.reset_index().drop(['index'], axis=1)
ergDF = ergDF.iloc[-numberOfLastEntries:]
return ergDF
def getDesiredState(listWithStates, state):
stateCounter = 0
for i in listWithStates:
if state == i['state']:
stateCounter += 1
return stateCounter
def getDesiredStateExplanation(listWithStates, state, stateExplanation):
stateExpressionCounter = 0
for i in listWithStates:
if state == i['state'] and stateExplanation == i['stateExplanation']:
stateExpressionCounter += 1
return stateExpressionCounter
def createOverview(givenType: str):
resultOverview = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
listWithStates = []
for i in resultOverview:
listWithStates.append(i)
stateCountACTIVE = getDesiredState(listWithStates, 'ACTIVE')
stateCountINACTIVE = getDesiredState(listWithStates, 'INACTIVE')
stateCountUNKNOWN = getDesiredState(listWithStates, 'UNKNOWN')
return stateCountACTIVE, stateCountINACTIVE, stateCountUNKNOWN
def createReasonsForInactivity(givenType: str):
uniqueList = facilities.distinct("stateExplanation");
resultGruendeFuerInaktivitaet = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
'stateExplanation': {'$last': '$stateExplanation'}
}}
])
listWithStateExplanations = []
for i in resultGruendeFuerInaktivitaet:
listWithStateExplanations.append(i)
dictStateExplanationReason = {}
for i in uniqueList:
count = getDesiredStateExplanation(listWithStateExplanations, 'INACTIVE', str(i))
if count != 0:
dictStateExplanationReason[str(i)] = count
key_array = []
value_array = []
for key, value in dictStateExplanationReason.items():
key_array.append(key)
value_array.append(value)
return key_array, value_array
def createInitialData():
client = pymongo.MongoClient(MONGO_URL, maxPoolSize=50)
dbeva = client.eva_dev
facilities = dbeva['facilities']
# Aufzüge reinladen
conn = psycopg2.connect(host='station-db', user='postgres', password='<PASSWORD>', dbname='eva_dev', port=5432)
cur = conn.cursor()
querry = 'select * from "elevator"'
cur.execute( querry )
stammdaten_liste = cur.fetchall()
aufzüge = pd.DataFrame(stammdaten_liste)
columns = ['ID','Standort Equipment', 'TechnPlatzBezeichng', 'Equipment', 'Equipmentname', 'Ort', 'Wirtschaftseinheit',
'Hersteller',
'Baujahr', 'ANTRIEBSART', 'ANZAHL_HALTESTELLEN', 'ANZAHL_TUEREN_KABINE', 'ANZAHL_TUEREN_SCHACHT',
'FOERDERGESCHWINDIGKEIT',
'FOERDERHOEHE', 'LAGE', 'TRAGKRAFT', 'ERWEITERTE_ORTSANGABE', 'MIN_TUERBREITE', 'KABINENTIEFE',
'KABINENBREITE',
'KABINENHOEHE', 'TUERHOHE', 'FABRIKNUMMER', 'TUERART', 'GEOKOORDINATERECHTSWERT',
'GEOKOORDINATEHOCHWERT', 'AUSFTEXTLICHEBESCHREIBUNG']
aufzüge.columns = columns
aufzüge = aufzüge.drop(0)
aufzüge['Equipment'] = aufzüge['Equipment'].astype(str).astype('int64')
aufzüge = aufzüge.drop_duplicates(['Equipment'])
aufzüge = aufzüge.drop(columns=['ID'])
aufzüge = aufzüge.fillna(value=np.nan)
aufzüge['Baujahr'] = pd.to_numeric(aufzüge['Baujahr'], errors='coerce')
print('Anzahl Aufzüge: ', len(aufzüge))
return facilities, aufzüge
def createMap(givenType: str):
resultCommandCursor = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'description': {'$last': '$description'},
'geocoordX': {'$last': '$geocoordX'},
'geocoordY': {'$last': '$geocoordY'},
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
resultCommandCursor = pd.DataFrame(list(resultCommandCursor))
resultCommandCursor.columns = ['equipmentnumber', 'description', 'geocoordX', 'geocoordY', 'lastStateChangeDate', 'state']
inactive = resultCommandCursor[resultCommandCursor['state'] == 'INACTIVE']
active = resultCommandCursor[resultCommandCursor['state'] == 'ACTIVE']
# Zoom am ausgewählten Ort
geolocator = Nominatim(user_agent="Eva_Dashboard")
return inactive, active, geolocator
#####################################################################
################ Start of Code (create initial data) ################
#####################################################################
facilities, aufzüge = createInitialData()
############################################################
################# Die Aufzüge im Überblick #################
############################################################
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
############################################################
############### Die Rolltreppen im Überblick ###############
############################################################
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
####################################################
###### Gründe für Inaktivität von Fahrstühlen ######
####################################################
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
####################################################
###### Gründe für Inaktivität von Rolltreppen ######
####################################################
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
####################################################
###### Routine zum Aktualisieren der Daten ######
####################################################
def updateValues():
global facilities, aufzüge, elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN
global escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN
global elevator_key_array, elevator_value_array
global escalator_key_array, escalator_value_array
facilities, aufzüge = createInitialData()
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
# Daten werden jede Stunde aktualisiert
scheduler = BlockingScheduler()
scheduler.add_job(updateValues, 'interval', minutes=5)
class UpdateValue(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
scheduler.start()
print('Thread zum Updaten der Werte gestartet!')
tread = UpdateValue()
tread.start()
####################################
###### Wusstest du schon? ######
####################################
# Ältester Aufzug
aeltesteAufzug_datensatz = aufzüge[aufzüge['Baujahr'] == int(aufzüge['Baujahr'].min())]
aeltesteAufzug_ort = aeltesteAufzug_datensatz['Ort'].values[0]
aeltesteAufzug_jahr = int(aeltesteAufzug_datensatz['Baujahr'].values[0])
# Station mit den meisten Aufzügen
uniquelist_orte = aufzüge['Ort'].unique()
df_anzahlProStation = | pd.DataFrame(columns=['Ort', 'Anzahl_Aufzüge']) | pandas.DataFrame |
import datetime
import functools
from decimal import Decimal
import pandas as pd
from dateutil.parser import parse as time_parser
from enums import BuySell
class LocalTradeRecord:
def __init__(self):
self.path = "data/trade_record.csv"
# number, stock_id, buy_time, sell_time, buy_price, sell_price, volumn, buy_cost, sell_cost, revenue
self.df = pd.read_csv(self.path,
dtype={0: int, 1: str, 2: str, 3: str, 4: str, 5: str, 6: float, 7: str, 8: str, 9: str})
# 交易紀錄索引值
def getLastNumber(self):
last_row = list(self.df.iloc[-1])
return last_row[0]
def getLastBuyTime(self, stock_id):
sub_df = self.df[self.df["stock_id"] == stock_id].copy()
sub_df.sort_values(by=["number", "buy_time"], inplace=True)
last_buy = list(sub_df.iloc[-1])[2]
return time_parser(last_buy)
def saveTradeRecord(self, stock_id, buy_time: datetime.datetime, sell_time: datetime.datetime, buy_price: Decimal,
sell_price: Decimal, volumn: int, buy_cost: Decimal, sell_cost: Decimal, revenue: Decimal):
"""
:param stock_id:
:param buy_time:
:param sell_time:
:param buy_price:
:param sell_price:
:param volumn:
:param buy_cost:
:param sell_cost:
:param revenue:
:return:
"""
number = self.getLastNumber() + 1
data = {"number": number,
"stock_id": stock_id,
"buy_time": str(buy_time.date()),
"sell_time": str(sell_time.date()),
"buy_price": str(buy_price),
"sell_price": str(sell_price),
"volumn": volumn,
"buy_cost": str(buy_cost),
"sell_cost": str(sell_cost),
"revenue": str(revenue)}
self.df = self.df.append(data, ignore_index=True)
self.df.to_csv(self.path, index=False)
return data
# # 移除庫存
# self.api.removeInventory(guid=guid)
#
# # TODO: 寫出交易紀錄,並更新資金(funds.csv)
# self.local_capital.allocateRevenue(deal_time=sell_time, remark=str(number), trade_revenue=trade_revenue)
# # self.api.recordTrading(stock_id=stock_id,
# # buy_price=str(buy_price),
# # sell_price=str(sell_price),
# # vol=buy_volumn,
# # buy_time=buy_time,
# # sell_time=sell_time,
# # buy_cost=str(buy_cost),
# # sell_cost=str(sell_cost),
# # revenue=str(revenue - buy_cost - sell_cost))
#
# self.logger.info(f"record: {record}", extra=self.extra)
# f.write(record)
def recordDividend(self, stock_id: str, revenue: str, pay_time: datetime.datetime = datetime.datetime.today()):
last_buy = self.getLastBuyTime(stock_id=stock_id)
buy_time = last_buy.strftime("%Y-%m-%d")
sell_time = pay_time.strftime("%Y-%m-%d")
number = self.getLastNumber() + 1
data = {"number": number,
"stock_id": stock_id,
"buy_time": buy_time,
"sell_time": sell_time,
"buy_price": "0",
"sell_price": "0",
"volumn": 0,
"buy_cost": "0",
"sell_cost": "0",
"revenue": revenue}
self.df = self.df.append(data, ignore_index=True)
self.df.to_csv(self.path, index=False)
return data
def renumber(self):
n_data = len(self.df)
numbers = list(range(1, n_data + 1))
self.df["number"] = numbers
self.df.to_csv(self.path, index=False)
def sortOperates(operates):
"""
將操作做排序,排序優先順序為: 日期(越早越前) -> 操作類型(buy 優先,再來才是 sell)
xxx_operate -> [datetime, buy/sell, cost/income]
:param operates: 所有操作
:return:
"""
def compareOperates(op1, op2):
"""
sorted()也是一個高階函式,它可以接收一個比較函式來實現自定義排序,
比較函式的定義是,傳入兩個待比較的元素 x, y,
如果 x 應該排在 y 的前面,返回 -1,
如果 x 應該排在 y 的後面,返回 1。
如果 x 和 y 相等,返回 0。
def customSort(x, y):
if x > y:
return -1
if x < y:
return 1
return 0
print(sorted([2,4,5,7,3], key=functools.cmp_to_key(customSort)))
-> [7, 5, 4, 3, 2]
:param op1: 請求 1
:param op2: 請求 2
:return:
"""
# datetime, buy/sell, cost/income
time1, buy_sell1, _ = op1
time2, buy_sell2, _ = op2
# 時間越早排越前面
if time1 < time2:
return -1
elif time1 > time2:
return 1
# 數量少的排前面
if buy_sell1.value < buy_sell2.value:
return -1
# 數量多的排後面
elif buy_sell1.value > buy_sell2.value:
return 1
else:
return 0
# 透過自定義規則函式 compareRequests 來對 requests 來進行排序
return sorted(operates, key=functools.cmp_to_key(compareOperates))
def evaluateTradingPerformance():
path = "data/trade_record.csv"
# number,stock_id,buy_time,sell_time,buy_price,sell_price,volumn,buy_cost,sell_cost,revenue
df = pd.read_csv(path)
df["buy_time"] = pd.to_datetime(df["buy_time"])
df["sell_time"] = | pd.to_datetime(df["sell_time"]) | pandas.to_datetime |
from pandas import Series,DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.cluster import KMeans
from sklearn import preprocessing
import os
from sklearn import datasets
import sklearn.metrics as sm
from sklearn.preprocessing import LabelEncoder
#calling in libraries
data= | pd.read_csv(r"C:\Users\mudit\Documents\weather-check.csv") | pandas.read_csv |
# Time series subplots
# Import Modules
import os
import pandas as pd
import numpy as np
import datetime as dt
import csv
import matplotlib.pyplot as plt
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# All Posts
# Read CSV
df = pd.read_csv('ClimateSkepticsAllPosts.csv',index_col=0,parse_dates=True)
def get_yearmonth(timestamp):
month = timestamp[5:7]
year = timestamp[:4]
monthyear = str(year) + '/' + str(month)
return monthyear
df['YearMonth'] = df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
df_grp = df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
dateTime = pd.DataFrame()
dateTime['YearMonth'] = Y_M
dateTime['Posts'] = Num_Posts
dateTime.to_csv('ClimateSkeptics_Posts_per_Month.csv')
datetime_list = []
for i in list(range(0,len(dateTime))):
month = int(dateTime['YearMonth'][i][5:7])
year = int(dateTime['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
dateTime['Date'] = datetime_list
# All Submissions
# Read CSV
path=os.getcwd()
fullpath=path+'/Outputs/CS_FULL/LDA_Dataframes/topic_timeseries_10.csv'
df = pd.read_csv(fullpath,index_col=0,parse_dates=True)
df['YearMonth'] = df['timestamp'].apply(lambda x: get_yearmonth(x)) # Bin Data in Months
df_grp = df.groupby('YearMonth')
Y_M = []
Num_Posts = []
for i, grp in df_grp:
grplen = len(grp)
Num_Posts.append(grplen) # Get Number of Posts per Month
Y_M.append(i)
# New DateFrame
dateTime2 = pd.DataFrame()
dateTime2['YearMonth'] = Y_M
dateTime2['Posts'] = Num_Posts
dateTime2.to_csv('ClimateSkeptics_Submissions_per_Month.csv')
datetime_list = []
for i in list(range(0,len(dateTime2))):
month = int(dateTime2['YearMonth'][i][5:7])
year = int(dateTime2['YearMonth'][i][0:4])
datetime_list.append(dt.date(year,month,1))
dateTime2['Date'] = datetime_list
# Get Subscribers
subs = pd.read_csv('climateskeptics_subscribers.csv')
subs.columns = ['timestamp','Subscribers']
datetime_list = []
for i in list(range(0,len(subs))):
day = subs['timestamp'][i][:2]
month = subs['timestamp'][i][3:5]
year = subs['timestamp'][i][6:10]
datetime_list.append(dt.date(int(year),int(month),int(day)))
subs['Date'] = datetime_list
# NOW DO SPECIFIC SEARCHES
# CLIMATEGATE
cgate_posts = pd.read_csv('CS_CGate_posts.csv')
cgate_posts = cgate_posts.drop(['title','url','comms_num'],axis=1)
cgate_coms = pd.read_csv('CS_CGate_comments.csv')
cgate_df = | pd.concat([cgate_posts,cgate_coms]) | pandas.concat |
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import expon
from scipy.stats import randint
from sklearn.multioutput import MultiOutputClassifier
import numpy as np
if __name__ == '__main__':
path = 'example_data.csv'
df = pd.read_csv(path)
df = pd.get_dummies(df)
# Create a 3d scatter plot of the data
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df['x'], df['y'], df['z'])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
# Create a 2d scatter plot of the data
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(df['x'], df['z'])
ax.set_xlabel('X')
ax.set_ylabel('Z')
plt.show()
# We will train the model to predict Z and the category of the data from the X and Y values
# Create empty Dataframes
X = pd.DataFrame()
Y = | pd.DataFrame() | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
| pd.testing.assert_series_equal(expected_out_a, out['a']) | pandas.testing.assert_series_equal |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
| pd.Timestamp("2015-01-12") | pandas.Timestamp |
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = | Series(s.values) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.ix[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.ix[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.ix['b'] = np.nan
result = p.dropna(how='all')
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
com.pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
self.assertRaises(ValueError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
assert_panel_equal(dropped, panel)
dropped = panel.drop(['Two', 'Three'], errors='ignore')
expected = Panel({"One": df})
assert_panel_equal(dropped, expected)
# Major
exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(0, 1, ['major_axis', 'major'], expected)
exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop([1], 1, ['major_axis', 'major'], expected)
# Minor
exp_df = df[['B']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
exp_df = df[['A']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop("B", 2, ['minor_axis', 'minor'], expected)
def test_update(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[3.6, 2., 3], [1.5, np.nan, 7], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_from_dict(self):
pan = Panel({'one': DataFrame([[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
'two': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
other = {'two': DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]])}
pan.update(other)
expected = Panel(
{'two': DataFrame([[3.6, 2., 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
'one': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
assert_panel_equal(pan, expected)
def test_update_nooverwrite(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, overwrite=False)
expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, 2., 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_filtered(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, filter_func=lambda x: x > 2)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, np.nan, 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_raise(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
np.testing.assert_raises(Exception, pan.update, *(pan, ),
**{'raise_conflict': True})
def test_all_any(self):
self.assertTrue((self.panel.all(axis=0).values == nanall(
self.panel, axis=0)).all())
self.assertTrue((self.panel.all(axis=1).values == nanall(
self.panel, axis=1).T).all())
self.assertTrue((self.panel.all(axis=2).values == nanall(
self.panel, axis=2).T).all())
self.assertTrue((self.panel.any(axis=0).values == nanany(
self.panel, axis=0)).all())
self.assertTrue((self.panel.any(axis=1).values == nanany(
self.panel, axis=1).T).all())
self.assertTrue((self.panel.any(axis=2).values == nanany(
self.panel, axis=2).T).all())
def test_all_any_unhandled(self):
self.assertRaises(NotImplementedError, self.panel.all, bool_only=True)
self.assertRaises(NotImplementedError, self.panel.any, bool_only=True)
class TestLongPanel(tm.TestCase):
"""
LongPanel no longer exists, but...
"""
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
panel = tm.makePanel()
tm.add_nans(panel)
self.panel = panel.to_frame()
self.unfiltered_panel = panel.to_frame(filter_observations=False)
def test_ops_differently_indexed(self):
# trying to set non-identically indexed panel
wp = self.panel.to_panel()
wp2 = wp.reindex(major=wp.major_axis[:-1])
lp2 = wp2.to_frame()
result = self.panel + lp2
assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
# careful, mutation
self.panel['foo'] = lp2['ItemA']
assert_series_equal(self.panel['foo'].reindex(lp2.index), lp2['ItemA'],
check_names=False)
def test_ops_scalar(self):
result = self.panel.mul(2)
expected = DataFrame.__mul__(self.panel, 2)
assert_frame_equal(result, expected)
def test_combineFrame(self):
wp = self.panel.to_panel()
result = self.panel.add(wp['ItemA'].stack(), axis=0)
assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
wp = self.panel.to_panel()
result = self.panel.add(self.panel)
wide_result = result.to_panel()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
# one item
result = self.panel.add(self.panel.filter(['ItemA']))
def test_combine_scalar(self):
result = self.panel.mul(2)
expected = DataFrame(self.panel._data) * 2
assert_frame_equal(result, expected)
def test_combine_series(self):
s = self.panel['ItemA'][:10]
result = self.panel.add(s, axis=0)
expected = DataFrame.add(self.panel, s, axis=0)
assert_frame_equal(result, expected)
s = self.panel.ix[5]
result = self.panel + s
expected = DataFrame.add(self.panel, s, axis=1)
assert_frame_equal(result, expected)
def test_operators(self):
wp = self.panel.to_panel()
result = (self.panel + 1).to_panel()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_arith_flex_panel(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
self.panel = self.panel.to_panel()
for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
for op in ops:
alias = aliases.get(op, op)
f = getattr(operator, alias)
exp = f(self.panel, n)
result = getattr(self.panel, op)(n)
assert_panel_equal(result, exp, check_panel_type=True)
# rops
r_f = lambda x, y: f(y, x)
exp = r_f(self.panel, n)
result = getattr(self.panel, 'r' + op)(n)
assert_panel_equal(result, exp)
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sortlevel(level=1)
self.assertTrue(is_sorted(sorted_minor.index.labels[1]))
sorted_major = sorted_minor.sortlevel(level=0)
self.assertTrue(is_sorted(sorted_major.index.labels[0]))
def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(start, end)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(before=start).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(before=start)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(after=end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(after=end)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
# truncate on dates that aren't in there
wp = self.panel.to_panel()
new_index = wp.major_axis[::5]
wp2 = wp.reindex(major=new_index)
lp2 = wp2.to_frame()
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
assert_panel_equal(wp_trunc, lp_trunc.to_panel())
# throw proper exception
self.assertRaises(Exception, lp2.truncate, wp.major_axis[-2],
wp.major_axis[2])
def test_axis_dummies(self):
from pandas.core.reshape import make_axis_dummies
minor_dummies = make_axis_dummies(self.panel, 'minor')
self.assertEqual(len(minor_dummies.columns),
len(self.panel.index.levels[1]))
major_dummies = make_axis_dummies(self.panel, 'major')
self.assertEqual(len(major_dummies.columns),
len(self.panel.index.levels[0]))
mapping = {'A': 'one', 'B': 'one', 'C': 'two', 'D': 'two'}
transformed = make_axis_dummies(self.panel, 'minor',
transform=mapping.get)
self.assertEqual(len(transformed.columns), 2)
self.assert_numpy_array_equal(transformed.columns, ['one', 'two'])
# TODO: test correctness
def test_get_dummies(self):
from pandas.core.reshape import get_dummies, make_axis_dummies
self.panel['Label'] = self.panel.index.labels[1]
minor_dummies = make_axis_dummies(self.panel, 'minor')
dummies = get_dummies(self.panel['Label'])
self.assert_numpy_array_equal(dummies.values, minor_dummies.values)
def test_mean(self):
means = self.panel.mean(level='minor')
# test versus Panel version
wide_means = self.panel.to_panel().mean('major')
assert_frame_equal(means, wide_means)
def test_sum(self):
sums = self.panel.sum(level='minor')
# test versus Panel version
wide_sums = self.panel.to_panel().sum('major')
assert_frame_equal(sums, wide_sums)
def test_count(self):
index = self.panel.index
major_count = self.panel.count(level=0)['ItemA']
labels = index.labels[0]
for i, idx in enumerate(index.levels[0]):
self.assertEqual(major_count[i], (labels == i).sum())
minor_count = self.panel.count(level=1)['ItemA']
labels = index.labels[1]
for i, idx in enumerate(index.levels[1]):
self.assertEqual(minor_count[i], (labels == i).sum())
def test_join(self):
lp1 = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC'])
joined = lp1.join(lp2)
self.assertEqual(len(joined.columns), 3)
self.assertRaises(Exception, lp1.join,
self.panel.filter(['ItemB', 'ItemC']))
def test_pivot(self):
from pandas.core.reshape import _slow_pivot
one, two, three = (np.array([1, 2, 3, 4, 5]),
np.array(['a', 'b', 'c', 'd', 'e']),
np.array([1, 2, 3, 5, 4.]))
df = pivot(one, two, three)
self.assertEqual(df['a'][1], 1)
self.assertEqual(df['b'][2], 2)
self.assertEqual(df['c'][3], 3)
self.assertEqual(df['d'][4], 5)
self.assertEqual(df['e'][5], 4)
assert_frame_equal(df, _slow_pivot(one, two, three))
# weird overlap, TODO: test?
a, b, c = (np.array([1, 2, 3, 4, 4]),
np.array(['a', 'a', 'a', 'a', 'a']),
np.array([1., 2., 3., 4., 5.]))
self.assertRaises(Exception, pivot, a, b, c)
# corner case, empty
df = pivot(np.array([]), np.array([]), np.array([]))
def test_monotonic():
pos = np.array([1, 2, 3, 5])
def _monotonic(arr):
return not (arr[1:] < arr[:-1]).any()
assert _monotonic(pos)
neg = np.array([1, 2, 3, 4, 3])
assert not _monotonic(neg)
neg2 = np.array([5, 1, 2, 3, 4, 5])
assert not _monotonic(neg2)
def test_panel_index():
index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
expected = MultiIndex.from_arrays([np.tile(
[1, 2, 3, 4], 3), np.repeat(
[1, 2, 3], 4)])
assert (index.equals(expected))
def test_import_warnings():
# GH8152
panel = Panel(np.random.rand(3, 3, 3))
with | assert_produces_warning() | pandas.util.testing.assert_produces_warning |
#!/usr/bin/env python3
import re
import glob
from numpy import nan
from os.path import isfile
from pandas import DataFrame, read_pickle, concat
from lib.yababay.corpus import Ethnical
ethnical_corpus = Ethnical()
ethnical_df = None
re_filename = re.compile(r'.*\/([^\/]+.txt)$')
CORPUS_DIR = '../assets/corpus'
text_files = [re.match(re_filename, filename).group(1) for filename in glob.glob(f'{CORPUS_DIR}/*.txt')]
for filename in text_files:
pickle_filename = f'{CORPUS_DIR}/{filename.replace(".txt", "-ethnical.pkl")}'
if isfile(pickle_filename):
print('ok', filename)
df = read_pickle(pickle_filename)
df['filename'] = [filename.replace('.txt', '_count'), filename.replace('.txt', '_sents')]
if ethnical_df is None:
ethnical_df = df
else:
ethnical_df = | concat([ethnical_df, df]) | pandas.concat |
"""Tests for pylola"""
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
import pylola
class TestRunLOLA(unittest.TestCase):
"""Tests for the run_lola functions."""
@classmethod
def setUp(cls):
cls.query1 = pd.read_csv(
"./tests/test_files/query1.bed", sep="\t", header=None
)[[0, 1, 2]].rename(columns={0: "chrom", 1: "start", 2: "end"})
cls.target1 = | pd.read_csv("./tests/test_files/target1.bed", sep="\t") | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index,
identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index= | lrange(0, 5) | pandas.compat.lrange |
import glob # Unix style pathname pattern expansion
import re # Regular expression operations
import numpy as np # fundamental package for scientific computing
from obspy.core import UTCDateTime # framework for processing seismological data
import datetime # Basic date and time types
import scipy.io # ecosystem of open-source software for mathematics, science, and engineering
import pandas as pd # open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools
import matplotlib.pyplot as plt #
from obspy.geodetics.base import gps2dist_azimuth
### Import GeoSEA Modules ###
from .extract_df import *
from .change2dateindex import *
from .read_data import *
from .read_id import *
### Global Variables ###
GMT_DATEFORMAT = '%Y-%m-%dT%H:%M'
def read(starttime=None, endtime=None, pathname=None, writefile=True):
""" Reads data from *csv files.
Note that the *csv files have to be unique for each station!
It needs:
starttime (optional) ... no measurement before this time is used (format
'YYYY-MM-DD hh:mm:ss')
endtime (optional) ... no measurement after this time is used (format
'YYYY-MM-DD hh:mm:ss')
pathname (optional) ... location of input files (default ../RAW/)
writefile (optional) ... if True files containing all read-in parameters
will be created in current directory, if False data will just be
returned (default True)
It returns:
ID ... an 1-dim list with station IDs
st_series ... an 1-dim list with pandas.DataFrame with columns:
temperature ('hrt'), pressure ('prs'), sound speed ('ssp'), temperature
from pressure ('tpr'), inclinometer data ('pitch','roll'), battery ('bat','vlt')
and pages ('pag') with corresponding times of measurement for each beacon
(same order as items in ID)
bsl_series ... an 1-dim list with pandas.DataFrame with baseline
measurements: ID of other station ('range_ID'), traveltime ('range')
and turn around time ('TAT') with corresponding times of measurement
for each beacon (same order as items in ID)
It further writes human readable files for pressure, inclinometer
data, battery, and pages, respectively.
"""
ID = []
if pathname is None:
pathname = '../RAW/'
ID = read_id(pathname)
ifiles = glob.glob(pathname + 'Data_*_*_*.csv')
#-------------------------------------------------------------------------------
# Open and merge all Raw Files
#-------------------------------------------------------------------------------
st_series = []
bsl_series = []
# pre-define column names (needed because of different number of columns
# per row in input files)
my_cols = ["A","B","C","D","E","F","G","H","I","J"]
print('-------------------------------------------------------------------------------\n')
print('GeoSEA Python Module v1.21 20 July 2020\n')
print('GEOMAR Helmholtz Centre for Ocean Research Kiel')
print('-------------------------------------------------------------------------------\n\n')
for j,station in enumerate(ID):
print('\nData Processing for Station: ' + station)
print('-------------------------------------------------------------------------------')
print('Open Files:')
# create empty pandas.DataFrame for storing of data from file
all_data = pd.DataFrame()
for i,data in enumerate(ifiles):
stationname = data.split('_', 3)
if station in stationname:
print(data)
# reads data from csv-file using pandas
# my_cols ... pre-defined column names
# skiprow=13 ... skips first 13 rows
# index_col=0 ... uses first column as index
# final dataframe has columns 'B'-'J' (0-9) and index column
# with ['PAG','BSL',...]
curfile = | pd.read_csv(data,names=my_cols,skiprows=13,index_col=0,low_memory=False) | pandas.read_csv |
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.decomposition import pca
sns.set_style("whitegrid")
class Weights:
def __init__(self, tumor_path: pd.DataFrame, sample_dir: str):
self.tumor_path = tumor_path
self.tumor = self._load_tumor()
self.genes = self.tumor.columns[5:]
self.sample_dir = sample_dir
self.df = self._weight_df()
self.perc = self._perc_df()
self.num_samples = len(self.df["sample"].unique())
def _weight_df(self) -> pd.DataFrame:
"""
Creates DataFrame of sample weights from a directory of samples
Columns: tissue, normal_tissue, weight, sample_id
Returns:
DataFrame of Weights
"""
# DataFrame: cols=tissue, normal_tissue, weight
weights = []
tissues = self.tumor.tissue
for sample in os.listdir(self.sample_dir):
sample_tissue = tissues.loc[sample]
w = pd.read_csv(
os.path.join(self.sample_dir, sample, "weights.tsv"), sep="\t"
)
w.columns = ["normal_tissue", "Median", "std"]
w["tissue"] = sample_tissue
w["sample"] = sample
weights.append(w.drop("std", axis=1))
return | pd.concat(weights) | pandas.concat |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import datetime
import hashlib
import json
import os
import tempfile
from multiprocessing import current_process
from pathlib import Path
from random import randint
from time import sleep
import pandas as pd
from azureml.automl.core.shared import constants
from azureml.automl.core.shared.exceptions import (AutoMLException,
ClientException, ErrorTypes)
from azureml.automl.core.shared.utilities import get_error_code
from azureml.core import Run
from azureml.core.model import Model
from azureml.train.automl import AutoMLConfig
from azureml_user.parallel_run import EntryScript
from train_automl_helper import compose_logs, str2bool
current_step_run = Run.get_context()
# This is used by UI to display the many model settings
many_model_run_properties = {'many_models_run': True}
LOG_NAME = "user_log"
parser = argparse.ArgumentParser("split")
parser.add_argument("--process_count_per_node", default=1, type=int, help="number of processes per node")
parser.add_argument("--retrain_failed_models", default=False, type=str2bool, help="retrain failed models only")
args, _ = parser.parse_known_args()
def read_from_json():
full_path = Path(__file__).absolute().parent
with open(str(full_path) + "/automlconfig.json") as json_file:
return json.load(json_file)
automl_settings = read_from_json()
# ''"{\"task\": \"forecasting\", \"iteration_timeout_minutes\": 10, \"iterations\": 10, \"n_cross_validations\": 3,
# \"primary_metric\": \"accuracy\", \"preprocess\": false, \"verbosity\": 20, \"label_column_name\": \"Quantity\",
# \"debug_log\": \"automl_oj_sales_errors.log\", \"time_column_name\": \"WeekStarting\", \"max_horizon\": 6,
# \"drop_column_names\": [\"logQuantity\"], \"group_column_names\": [\"Store\", \"Brand\"]}"''
timestamp_column = automl_settings.get('time_column_name', None)
grain_column_names = automl_settings.get('grain_column_names', [])
group_column_names = automl_settings.get('group_column_names', [])
max_horizon = automl_settings.get('max_horizon', 0)
target_column = automl_settings.get('label_column_name', None)
print("max_horizon: {}".format(max_horizon))
print("target_column: {}".format(target_column))
print("timestamp_column: {}".format(timestamp_column))
print("group_column_names: {}".format(group_column_names))
print("grain_column_names: {}".format(grain_column_names))
print("retrain_failed_models: {}".format(args.retrain_failed_models))
def init():
entry_script = EntryScript()
logger = entry_script.logger
output_folder = os.path.join(os.environ.get("AZ_BATCHAI_INPUT_AZUREML", ""), "temp/output")
working_dir = os.environ.get("AZ_BATCHAI_OUTPUT_logs", "")
ip_addr = os.environ.get("AZ_BATCHAI_WORKER_IP", "")
log_dir = os.path.join(working_dir, "user", ip_addr, current_process().name)
t_log_dir = Path(log_dir)
t_log_dir.mkdir(parents=True, exist_ok=True)
automl_settings['many_models'] = True
automl_settings['many_models_process_count_per_node'] = args.process_count_per_node
debug_log = automl_settings.get('debug_log', None)
if debug_log is not None:
automl_settings['debug_log'] = os.path.join(log_dir, debug_log)
automl_settings['path'] = tempfile.mkdtemp()
print(automl_settings['debug_log'])
logger.info(f"{__file__}.AutoML debug log:{debug_log}")
logger.info(f"{__file__}.output_folder:{output_folder}")
logger.info("init()")
sleep(randint(1, 120))
def train_model(file_path, data, logger):
file_name = file_path.split('/')[-1][:-4]
print(file_name)
logger.info("in train_model")
print('data')
print(data.head(5))
automl_config = AutoMLConfig(training_data=data,
**automl_settings)
logger.info("submit_child")
local_run = current_step_run.submit_child(automl_config, show_output=False)
local_run.add_properties({
k: str(many_model_run_properties[k])
for k in many_model_run_properties
})
logger.info(local_run)
print(local_run)
local_run.wait_for_completion(show_output=True)
best_child_run, fitted_model = local_run.get_output()
return fitted_model, local_run, best_child_run
def run(input_data):
entry_script = EntryScript()
logger = entry_script.logger
os.makedirs('./outputs', exist_ok=True)
resultList = []
model_name = None
current_run = None
error_message = None
error_code = None
error_type = None
tags_dict = None
for file in input_data:
logs = []
date1 = datetime.datetime.now()
logger.info('start (' + file + ') ' + str(datetime.datetime.now()))
file_path = file
file_name_with_extension = os.path.basename(file_path)
file_name, file_extension = os.path.splitext(file_name_with_extension)
try:
if file_extension.lower() == ".parquet":
data = pd.read_parquet(file_path)
else:
data = | pd.read_csv(file_path, parse_dates=[timestamp_column]) | pandas.read_csv |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import copy
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio.util._testing import assert_data_frame_almost_equal
class MetadataMixinTests:
def test_constructor_invalid_type(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
self._metadata_constructor_(metadata=md)
def test_constructor_no_metadata(self):
for md in None, {}:
obj = self._metadata_constructor_(metadata=md)
self.assertEqual(obj.metadata, {})
def test_constructor_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
obj = self._metadata_constructor_(
metadata={'': '', 123: {'a': 'b', 'c': 'd'}})
self.assertEqual(obj.metadata, {'': '', 123: {'a': 'b', 'c': 'd'}})
def test_constructor_makes_shallow_copy_of_metadata(self):
md = {'foo': 'bar', 42: []}
obj = self._metadata_constructor_(metadata=md)
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_eq(self):
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42}),
self._metadata_constructor_(metadata={'foo': 42}))
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42, 123: {}}),
self._metadata_constructor_(metadata={'foo': 42, 123: {}}))
def test_eq_missing_metadata(self):
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_())
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_(metadata={}))
self.assertReallyEqual(self._metadata_constructor_(metadata={}),
self._metadata_constructor_(metadata={}))
def test_ne(self):
# Both have metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_(metadata={'id': 'bar'})
self.assertReallyNotEqual(obj1, obj2)
# One has metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_()
self.assertReallyNotEqual(obj1, obj2)
def test_copy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_copy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_copy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {'foo': [1]})
self.assertEqual(obj_copy.metadata, {'foo': [1]})
self.assertIsNot(obj.metadata, obj_copy.metadata)
self.assertIs(obj.metadata['foo'], obj_copy.metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1, 2]})
def test_deepcopy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_deepcopy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {})
self.assertEqual(obj_copy.metadata, {})
self.assertIsNot(obj.metadata, obj_copy.metadata)
def test_deepcopy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj.metadata, {'foo': [1]})
self.assertEqual(obj_copy.metadata, {'foo': [1]})
self.assertIsNot(obj.metadata, obj_copy.metadata)
self.assertIsNot(obj.metadata['foo'], obj_copy.metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1]})
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_metadata_getter(self):
obj = self._metadata_constructor_(
metadata={42: 'foo', ('hello', 'world'): 43})
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {42: 'foo', ('hello', 'world'): 43})
obj.metadata[42] = 'bar'
self.assertEqual(obj.metadata, {42: 'bar', ('hello', 'world'): 43})
def test_metadata_getter_no_metadata(self):
obj = self._metadata_constructor_()
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {})
def test_metadata_setter(self):
obj = self._metadata_constructor_()
self.assertEqual(obj.metadata, {})
obj.metadata = {'hello': 'world'}
self.assertEqual(obj.metadata, {'hello': 'world'})
obj.metadata = {}
self.assertEqual(obj.metadata, {})
def test_metadata_setter_makes_shallow_copy(self):
obj = self._metadata_constructor_()
md = {'foo': 'bar', 42: []}
obj.metadata = md
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_metadata_setter_invalid_type(self):
obj = self._metadata_constructor_(metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
obj.metadata = md
self.assertEqual(obj.metadata, {123: 456})
def test_metadata_deleter(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
del obj.metadata
self.assertEqual(obj.metadata, {})
# Delete again.
del obj.metadata
self.assertEqual(obj.metadata, {})
obj = self._metadata_constructor_()
self.assertEqual(obj.metadata, {})
del obj.metadata
self.assertEqual(obj.metadata, {})
def test_has_metadata(self):
obj = self._metadata_constructor_()
self.assertFalse(obj.has_metadata())
self.assertFalse(
self._metadata_constructor_(metadata={}).has_metadata())
self.assertTrue(
self._metadata_constructor_(metadata={'': ''}).has_metadata())
self.assertTrue(
self._metadata_constructor_(
metadata={'foo': 42}).has_metadata())
class PositionalMetadataMixinTests:
def test_constructor_invalid_positional_metadata_type(self):
with self.assertRaisesRegex(TypeError,
'Invalid positional metadata. Must be '
'consumable by `pd.DataFrame` constructor.'
' Original pandas error message: '):
self._positional_metadata_constructor_(0, positional_metadata=2)
def test_constructor_positional_metadata_len_mismatch(self):
# Zero elements.
with self.assertRaisesRegex(ValueError, '\(0\).*\(4\)'):
self._positional_metadata_constructor_(4, positional_metadata=[])
# Not enough elements.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4])
# Too many elements.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(3)))
# Series too many rows.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows.
with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows.
with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(5)}))
# Empty DataFrame wrong size.
with self.assertRaisesRegex(ValueError, '\(2\).*\(3\)'):
self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(2)))
def test_constructor_no_positional_metadata(self):
# Length zero with missing/empty positional metadata.
for empty in None, {}, pd.DataFrame():
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=np.arange(0)))
# Nonzero length with missing positional metadata.
obj = self._positional_metadata_constructor_(
3, positional_metadata=None)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_constructor_with_positional_metadata_len_zero(self):
for data in [], (), np.array([]):
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': data})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=np.arange(0)))
def test_constructor_with_positional_metadata_len_one(self):
for data in [2], (2, ), np.array([2]):
obj = self._positional_metadata_constructor_(
1, positional_metadata={'foo': data})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=np.arange(1)))
def test_constructor_with_positional_metadata_len_greater_than_one(self):
for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
obj = self._positional_metadata_constructor_(
9, positional_metadata={'foo': data})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=np.arange(9)))
def test_constructor_with_positional_metadata_multiple_columns(self):
obj = self._positional_metadata_constructor_(
5, positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_constructor_with_positional_metadata_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
obj = self._positional_metadata_constructor_(
5, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_constructor_makes_shallow_copy_of_positional_metadata(self):
df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
obj = self._positional_metadata_constructor_(
3, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
self.assertIsNot(obj.positional_metadata, df)
# Original df is not mutated.
orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, orig_df)
# Change values of column (using same dtype).
df['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
# Change single value of underlying data.
df.values[0][0] = 10
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
# Mutate list (not a deep copy).
df['bar'][0].append(42)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
index=np.arange(3)))
def test_eq_basic(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
self.assertReallyEqual(obj1, obj2)
def test_eq_from_different_source(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': np.array([1, 2, 3])})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame({'foo': [1, 2, 3]},
index=['foo', 'bar', 'baz']))
self.assertReallyEqual(obj1, obj2)
def test_eq_missing_positional_metadata(self):
for empty in None, {}, pd.DataFrame(), pd.DataFrame(index=[]):
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(0))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
0, positional_metadata=empty))
for empty in None, pd.DataFrame(index=['a', 'b']):
obj = self._positional_metadata_constructor_(
2, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(2))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
2, positional_metadata=empty))
def test_ne_len_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': [], 'bar': []})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(0)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_greater_than_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 2]})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(3)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_mismatch(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj2 = self._positional_metadata_constructor_(
2, positional_metadata=pd.DataFrame(index=range(2)))
self.assertReallyNotEqual(obj1, obj2)
def test_copy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_copy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_copy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
self.assertIsNot(obj.positional_metadata,
obj_copy.positional_metadata)
self.assertIsNot(obj.positional_metadata.values,
obj_copy.positional_metadata.values)
self.assertIs(obj.positional_metadata.loc[0, 'bar'],
obj_copy.positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_deepcopy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
assert_data_frame_almost_equal(obj_copy.positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
def test_deepcopy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}, index=range(4)))
self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)
self.assertIsNot(obj.positional_metadata.values,
obj_copy.positional_metadata.values)
self.assertIsNot(obj.positional_metadata.loc[0, 'bar'],
obj_copy.positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_positional_metadata_getter(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [22, 22, 0]})
self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
# Update existing column.
obj.positional_metadata['foo'] = [42, 42, 43]
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43]}))
# Add new column.
obj.positional_metadata['foo2'] = [True, False, True]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43],
'foo2': [True, False, True]}))
def test_positional_metadata_getter_no_positional_metadata(self):
obj = self._positional_metadata_constructor_(4)
self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame(index=np.arange(4)))
def test_positional_metadata_getter_set_column_series(self):
length = 8
obj = self._positional_metadata_constructor_(
length, positional_metadata={'foo': range(length)})
obj.positional_metadata['bar'] = pd.Series(range(length-3))
# pandas.Series will be padded with NaN if too short.
npt.assert_equal(obj.positional_metadata['bar'],
np.array(list(range(length-3)) + [np.nan]*3))
obj.positional_metadata['baz'] = pd.Series(range(length+3))
# pandas.Series will be truncated if too long.
npt.assert_equal(obj.positional_metadata['baz'],
np.array(range(length)))
def test_positional_metadata_getter_set_column_array(self):
length = 8
obj = self._positional_metadata_constructor_(
length, positional_metadata={'foo': range(length)})
# array-like objects will fail if wrong size.
for array_like in (np.array(range(length-1)), range(length-1),
np.array(range(length+1)), range(length+1)):
with self.assertRaisesRegex(ValueError,
"Length of values does not match "
"length of index"):
obj.positional_metadata['bar'] = array_like
def test_positional_metadata_setter_pandas_consumable(self):
obj = self._positional_metadata_constructor_(3)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
obj.positional_metadata = {'foo': [3, 2, 1]}
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [3, 2, 1]}))
obj.positional_metadata = pd.DataFrame(index=np.arange(3))
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_positional_metadata_setter_data_frame(self):
obj = self._positional_metadata_constructor_(3)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
obj.positional_metadata = pd.DataFrame({'foo': [3, 2, 1]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [3, 2, 1]}))
obj.positional_metadata = pd.DataFrame(index=np.arange(3))
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_positional_metadata_setter_none(self):
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': []}))
# `None` behavior differs from constructor.
obj.positional_metadata = None
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=np.arange(0)))
def test_positional_metadata_setter_makes_shallow_copy(self):
obj = self._positional_metadata_constructor_(3)
df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
obj.positional_metadata = df
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
self.assertIsNot(obj.positional_metadata, df)
# Original df is not mutated.
orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, orig_df)
# Change values of column (using same dtype).
df['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
# Change single value of underlying data.
df.values[0][0] = 10
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=np.arange(3)))
# Mutate list (not a deep copy).
df['bar'][0].append(42)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
index=np.arange(3)))
def test_positional_metadata_setter_invalid_type(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 42]})
with self.assertRaisesRegex(TypeError,
'Invalid positional metadata. Must be '
'consumable by `pd.DataFrame` constructor.'
' Original pandas error message: '):
obj.positional_metadata = 2
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_setter_len_mismatch(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 42]})
# `None` behavior differs from constructor.
with self.assertRaisesRegex(ValueError, '\(0\).*\(3\)'):
obj.positional_metadata = None
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
with self.assertRaisesRegex(ValueError, '\(4\).*\(3\)'):
obj.positional_metadata = [1, 2, 3, 4]
assert_data_frame_almost_equal(obj.positional_metadata,
| pd.DataFrame({'foo': [1, 2, 42]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from unittest import TestCase
import pandas as pd
from pandas.util.testing import assert_frame_equal
from datetime import datetime as dt
from alphaware.base import (Factor,
FactorContainer)
from alphaware.preprocess import FactorStandardizer
from alphaware.enums import FactorType
class TestStandardizer(TestCase):
def test_standardizer(self):
index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28'], ['001', '002', '003', '004']],
names=['trade_date', 'ticker'])
data1 = pd.DataFrame(index=index, data=[1.0, 1.0, 1.2, 200.0, 0.9, 5.0, 5.0, 5.1])
factor_test1 = Factor(data=data1, name='test1')
data2 = | pd.DataFrame(index=index, data=[2.6, 2.5, 2.8, 2.9, 2.7, 1.9, -10.0, 2.1]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 12:31:33 2017
@author: Astrid
"""
import os
import pandas as pd
import numpy as np
from collections import Counter
import re
import multiprocessing
def getFileList(dir_name, ext=''):
file_dir_list = list()
file_list = list()
for file in os.listdir(dir_name):
# If no extension is specified, create list with all files
if not ext:
file_dir_list.append(os.path.join(dir_name, file))
file_list.append(file)
# If extension is specified, create list with only ext files
elif file.endswith(ext):
file_dir_list.append(os.path.join(dir_name, file))
file_list.append(file)
return file_list, file_dir_list
def string2vec(string):
vec = []
for t in string.split():
try:
vec.append(float(t))
except ValueError:
pass
return vec
def readDPJ(filename):
#Read .dpj file line by line
file_obj = open(filename, 'r', encoding='utf8')
file = file_obj.readlines()
file_obj.close()
del file_obj
# Search in file for lines that need to be changes, save those lines in a dataframe
# Create an array with the x-discretisation grid, an array with the y-discretisation grid and an array with the assignments
x_discretisation = list()
y_discretisation = list()
assignments = pd.DataFrame(columns=['line','type','range','name'])
parameters = pd.DataFrame(columns = ['line','parameter'])
l=23 #start looking on 24th line
# INITIALISATION SETTINGS
# Find start year and start time
while l < len(file):
if 'START_YEAR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'start year'},ignore_index=True)
parameters = parameters.append({'line': l+1,'parameter':'start time'},ignore_index=True)
l=l+4;
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: start year and start time not found')
l=l+1
# MATERIAL PARAMETERS
k=l
# Find air layer properties - included only when using an air layer to implement an interior climate dependent on V, n, HIR and exterior climate
while l < len(file):
if 'air room' in file[l].lower():
while file[l].strip() != '[MATERIAL]' and '; **' not in file[l]:
if 'CE' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air thermal capacity'},ignore_index=True)
l=l+1
continue
elif 'THETA_POR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_por'},ignore_index=True)
l=l+1
continue
elif 'THETA_EFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_eff'},ignore_index=True)
l=l+1
continue
elif 'THETA_80' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_80'},ignore_index=True)
l=l+1
continue
elif 'Theta_l(RH)' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air sorption curve'},ignore_index=True)
l=l+1
continue
l=l+1
l=l+5
break
# If the parameter is not found at the end of the file, there is no air layer. We must start looking for the next parameter from the same begin line, so we don't skip part of the file.
elif l == len(file)-2:
l=k
break
l=l+1
# WALLS
# Find wall conditions
while l < len(file):
if '[WALL_DATA]' in file[l]:
parameters = parameters.append({'line': l+2,'parameter':'wall orientation'},ignore_index=True)
parameters = parameters.append({'line': l+3,'parameter':'wall inclination'},ignore_index=True)
parameters = parameters.append({'line': l+4,'parameter':'latitude'},ignore_index=True)
l=l+9
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: wall orientation and inclination not found')
l=l+1
# CLIMATE CONDITIONS
while l < len(file):
if '[CLIMATE_CONDITIONS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: climate conditions section not found')
l=l+1
# Find climatic conditions
# Interior temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'TEMPER' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior temperature'},ignore_index=True)
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: interior temperature not found')
l=l+1
# Exterior temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'TEMPER' in file[l] and 'outside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'exterior temperature'},ignore_index=True)
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: interior temperature not found')
l=l+1
# Interior relative humidity
l=k # start at beginning of climate conditions
while l < len(file):
if 'RELHUM' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior relative humidity'},ignore_index=True)
break
l=l+1
# Exterior relative humidity
l=k # start at beginning of climate conditions
while l < len(file):
if 'RELHUM' in file[l] and 'outside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'exterior relative humidity'},ignore_index=True)
break
l=l+1
# Interior vapour pressure
l=k # start at beginning of climate conditions
while l < len(file):
if 'VAPPRES' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior vapour pressure'},ignore_index=True)
break
l=l+1
# Rain load - imposed flux on vertical surface
l=k # start at beginning of climate conditions
while l < len(file):
if 'NORRAIN' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'rain vertical surface'},ignore_index=True)
break
l=l+1
# Rain load - flux on horizontal surface
l=k # start at beginning of climate conditions
while l < len(file):
if 'HORRAIN' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'rain horizontal surface'},ignore_index=True)
break
l=l+1
# Wind direction
l=k # start at beginning of climate conditions
while l < len(file):
if 'WINDDIR' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'wind direction'},ignore_index=True)
break
l=l+1
# Wind velocity
l=k # start at beginning of climate conditions
while l < len(file):
if 'WINDVEL' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'wind velocity'},ignore_index=True)
break
l=l+1
# Direct sun radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'DIRRAD' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'direct radiation'},ignore_index=True)
break
l=l+1
# Diffuse sun radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'DIFRAD' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'diffuse radiation'},ignore_index=True)
break
l=l+1
# Cloud covering
l=k # start at beginning of climate conditions
while l < len(file):
if 'CLOUDCOV' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'cloud cover'},ignore_index=True)
break
l=l+1
# Sky radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'SKYEMISS' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'sky radiation'},ignore_index=True)
break
l=l+1
# Sky temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'SKYTEMP' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'sky temperature'},ignore_index=True)
break
l=l+1
# BOUNDARY CONDITIONS
while l < len(file):
if '[BOUNDARY_CONDITIONS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: boundary conditions section not found')
l=l+1
# Find exterior heat transfer coefficient
l=k; # start at beginning of boundary conditions
while l < len(file):
if 'HEATCOND' in file[l] and 'outside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'exterior heat transfer coefficient'},ignore_index=True)
if 'EXCH_SLOPE' in file[l+1].strip():
l=l+1
parameters = parameters.append({'line': l,'parameter':'exterior heat transfer coefficient slope'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find interior vapour surface resistance coefficient
l=k # start at beginning of boundary conditions
while l < len(file):
if 'VAPDIFF' in file[l] and 'inside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'interior vapour diffusion transfer coefficient'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find exterior vapour surface resistance coefficient
l=k # start at beginning of boundary conditions
while l < len(file):
if 'VAPDIFF' in file[l] and 'outside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'exterior vapour diffusion transfer coefficient'},ignore_index=True)
if 'EXCH_SLOPE' in file[l+1].strip():
l=l+1
parameters = parameters.append({'line': l,'parameter':'exterior vapour diffusion transfer coefficient slope'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find solar absorption
l=k #start at beginning of boundary conditions
while l < len(file):
if 'SURABSOR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'solar absorption'},ignore_index=True)
break
l=l+1
# Find scale factor catch ratio
l=k #start at beginning of boundary conditions
while l < len(file):
if 'EXPCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'scale factor catch ratio'},ignore_index=True)
break
l=l+1
# DISCRETISATION
while l < len(file):
if '[DISCRETISATION]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: discretisation section not found')
l=l+1
# Find discretisation
l=k #start at beginning of discretisation
while l < len(file):
if '[DISCRETISATION]' in file[l]:
x_discr_str = file[l+3]
parameters = parameters.append({'line': l+3,'parameter':'x-discretisation'},ignore_index=True)
y_discr_str = file[l+4]
parameters = parameters.append({'line': l+4,'parameter':'y-discretisation'},ignore_index=True)
# remove characters and convert to vector
x_discretisation = string2vec(x_discr_str)
y_discretisation = string2vec(y_discr_str)
break
# If the discretisation is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: discretisation not found')
l=l+1
# %OUTPUTS
while l < len(file):
if '[OUTPUTS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: outputs section not found')
l=l+1
# Find output folder
l=k # start at beginning of outputs
while l < len(file):
if 'OUTPUT_FOLDER' in file[l]:
parameters = parameters.append({'line': l,'parameter':'output folder'},ignore_index=True)
break
#If the output folder is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: output folder not found')
l=l+1
# Find output files
while l < len(file):
if '[FILES]' in file[l]:
l=l+3
while '; **' not in file[l]:
if 'NAME' in file[l]:
output_file = file[l]
parameters = parameters.append({'line': l,'parameter':output_file[33:]},ignore_index=True)
l=l+5
continue
l=l+1
break
# If the output files are not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: output files not found')
l=l+1
# ASSIGNMENTS
while l < len(file):
if '[ASSIGNMENTS]' in file[l]:
k=l
break
elif l == len(file):
print('Error: assignments section not found')
l=l+1
# Find assignments
l=k # start at beginning of assignments
while l < len(file):
if 'RANGE' in file[l]:
assignments = assignments.append({'line': l, 'type': file[l-1][30:-1].strip(),'range': [int(i) for i in string2vec(file[l])],'name': file[l+1][30:-1].strip()},ignore_index=True)
l=l+4
continue
l=l+1
#If the discretisation is not found at the end of the file, there is a problem in the code
if assignments.empty:
print('Error: assignments not found')
return file, x_discretisation, y_discretisation, assignments, parameters
def readccd(ccdfile, date=False):
# Find header
with open(ccdfile, 'r') as f:
l = 0
for line in f:
if '0:00:00' in line:
header = l
break
l = l+1
# Read ccd
value = np.loadtxt(ccdfile,skiprows=header,usecols=2,dtype='f').tolist()
if date:
day = np.loadtxt(ccdfile,skiprows=header,usecols=0,dtype='i').tolist()
hour = np.loadtxt(ccdfile,skiprows=header,usecols=1,dtype='U').tolist()
return value, day, hour
else:
return value
def saveccd(path, value):
days = int(len(value)/24)
df = pd.DataFrame()
df['day'] = np.repeat(list(range(days)),24).tolist()
df['hour'] = ['%02d:00:00' % x for x in range(24)]*days
df['value'] = value
climateparam = re.sub('[0-9_]', '', os.path.basename(path)[:-4])
df.to_csv(path, header=[headerccd(climateparam),'',''], sep=' ', index=False, quotechar=' ')
def headerccd(climateparam):
param_header = pd.DataFrame([{'parameter':'CloudCover', 'header': 'CLOUDCOV ---'},
{'parameter':'DiffuseRadiation', 'header': 'DIFRAD W/m2'},
{'parameter':'DirectRadiation', 'header': 'DIRRAD W/m2'},
{'parameter':'ShortWaveRadiation', 'header': 'SHWRAD W/m2'},
{'parameter':'GlobalRadiation', 'header': 'SKYEMISS W/m2'},
{'parameter':'RelativeHumidity', 'header': 'RELHUM %'},
{'parameter':'VapourPressure', 'header': 'VAPPRES Pa'},
{'parameter':'SkyRadiation', 'header': 'SKYEMISS W/m2'},
{'parameter':'Temperature', 'header': 'TEMPER C'},
{'parameter':'VerticalRain', 'header': 'NORRAIN l/m2h'},
{'parameter':'HorizontalRain', 'header': 'HORRAIN l/m2h'},
{'parameter':'WindDirection', 'header': 'WINDDIR Deg'},
{'parameter':'WindVelocity', 'header': 'WINDVEL m/s'},
])
header = param_header.loc[param_header['parameter'] == climateparam,'header'].tolist()[0]
if len(header) == 0:
print('Error: coud not find climate parameter header')
return header
def marker(param):
param_mark = pd.DataFrame([{'parameter': 'wall orientation','marker': 'Deg'},
{'parameter': 'wall inclination', 'marker': 'Deg'},
{'parameter': 'interior temperature', 'marker': 'C'},
{'parameter': 'exterior temperature', 'marker': 'C'},
{'parameter': 'exterior heat transfer coefficient', 'marker': 'W/m2K'},
{'parameter': 'exterior heat transfer coefficient slope', 'marker': 'J/m3K'},
{'parameter': 'interior relative humidity', 'marker': '%'},
{'parameter': 'exterior relative humidity', 'marker': '%'},
{'parameter': 'interior vapour pressure', 'marker': 'Pa'},
{'parameter': 'exterior vapour pressure', 'marker': 'Pa'},
{'parameter': 'interior vapour diffusion transfer coefficient', 'marker': 's/m'},
{'parameter': 'exterior vapour diffusion transfer coefficient', 'marker': 's/m'},
{'parameter': 'exterior vapour diffusion transfer coefficient slope', 'marker': 's2/m2'},
{'parameter': 'solar absorption', 'marker': '-'},
{'parameter': 'scale factor catch ratio', 'marker': '-'},
{'parameter': 'output folder', 'marker': ''},
{'parameter': 'x-discretisation', 'marker': 'm'},
{'parameter': 'y-discretisation', 'marker': 'm'},
{'parameter': 'start year', 'marker': ''},
{'parameter': 'start time', 'marker': ''}
])
mark = param_mark.loc[param_mark['parameter'] == param,'marker'].tolist()[0]
if len(mark) == 0:
print('Error: coud not find parameter marker')
return mark
def nameccd(climateparam):
param_name = pd.DataFrame([{'parameter': 'cloud cover','name':'CloudCover'},
{'parameter': 'diffuse radiation','name':'DiffuseRadiation'},
{'parameter': 'direct radiation','name':'DirectRadiation'},
{'parameter': 'sky radiation','name':'SkyRadiation'},
{'parameter': 'interior relative humidity','name':'RelativeHumidity'},
{'parameter': 'exterior relative humidity','name':'RelativeHumidity'},
{'parameter': 'interior vapour pressure','name':'VapourPressure'},
{'parameter': 'sky radiation','name':'SkyRadiation'},
{'parameter': 'interior temperature','name':'Temperature'},
{'parameter': 'exterior temperature','name':'Temperature'},
{'parameter': 'rain vertical surface','name':'VerticalRain'},
{'parameter': 'rain horizontal surface','name':'HorizontalRain'},
{'parameter': 'wind direction','name':'WindDirection'},
{'parameter': 'wind velocity','name':'WindVelocity'},
])
name = param_name.loc[param_name['parameter'] == climateparam,'name'].tolist()[0]
if len(name) == 0:
print('Error: coud not find climate parameter name')
return name
def readData(args):
n, files_num, delete = args
output_fn, geometry_fn, elements_fn = dict(), dict(), dict()
geom_x, geom_y = None, None
for file in files_num:
p = re.sub('[0-9_-]', '', file.split('\\')[-1][:-4])
with open(file, 'r', encoding='utf8') as f:
l = 0
for line in f:
# Find geometry line
if 'TABLE GRID' in line:
geom_x = string2vec(f.readline())
geom_y = string2vec(f.readline())
l += 2
# Find output start line
if 'ELEMENTS' in line or 'SIDES 'in line:
elem_f = string2vec(line)
output_f = np.loadtxt(file,skiprows=l+1,usecols=tuple(range(1,len(elem_f)+1)),dtype='f')
break
l +=1
# Combine in dictionary
geometry_fn['geom_x'], geometry_fn['geom_y'], = geom_x, geom_y
elements_fn[p] = elem_f
output_fn[p] = output_f
if delete:
os.remove(file)
return output_fn, geometry_fn, elements_fn
def readOutput(path, exclude=None, delete=False):
# Get list of all files that need to be read
files_all = getFileList(path, ext='.out')[1]
files = list()
if exclude:
for f in files_all:
if not any(e in f for e in exclude):
files.append(f)
else:
files = files_all
if not files:
return [], [], []
else:
# Extract output parameters from list
param = list(Counter([re.sub('[0-9_-]', '', x.split('\\')[-1][:-4]) for x in files]).keys())
# Extract numbers from list
num = list(Counter([re.sub('[a-zA-Z]', '', x[:-4]).split('_')[-1] for x in files]).keys())
do = [[int(x.split('-')[0]) for x in num], [int(x.split('-')[1]) for x in num]]
tuples = list(zip(*do))
# Read files
num_cores = multiprocessing.cpu_count()-1
pool = multiprocessing.Pool(num_cores)
args = [(n, [x for x in files if re.sub('[a-zA-Z]', '', x[:-4]).split('_')[-1] == n], delete) for n in num]
results = pool.map(readData, args)
pool.close()
pool.join()
output = pd.DataFrame([x[0] for x in results], columns=param, index= | pd.MultiIndex.from_tuples(tuples) | pandas.MultiIndex.from_tuples |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index= | Index(["a", "b"], name="id") | pandas.Index |
import math
import logging
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
import nltk
from nltk.tokenize import sent_tokenize
from spacy.cli.download import download as spacy_download
try:
import en_core_web_sm
except ImportError:
logging.warning(">Spacy en_core_web_sm not found. Downloading and installing.")
spacy_download("en_core_web_sm")
import en_core_web_sm
from collections import Counter, defaultdict, OrderedDict
import time
import os
from enum import Enum, auto
class ParseAndModel:
"""
Treats data input chain.
Based on this data, computes matrices for reviews and features.
Usage:
pm = ParseAndModel(feature_list=["sound", "battery", ["screen", "display"]],
filename='../tests/data/parse_and_model/iPod.final')
print(pm.model_results)
"""
class InputType(Enum):
"""
Enum holding the types of input files.
"""
annotated = auto()
docperline = auto()
def __init__(self,
feature_list: list = None,
filename: str = None,
input_type: Enum = InputType.annotated,
nlines: int = None,
remove_stopwords: bool = True,
start_line: int = 0,
lemmatize_words: bool = True,
log_base: int = None,
include_title_lines: bool = True):
"""
:param feature_list: a list of strings and lists of strings. Individual strings will be given separate ids, lists
of strings will be treated as synonyms and given the same feature id.
ex. ["sound", "battery", ["screen", "display"]]
:param filename: Filename for the data set
:param input_type: An enum of type InputType, specifying the type of input data so the correct read function can be chosen
options are "annotated" - which expects data in Santu's original format and "onedocperline" - which expects
all data to be in a single file with one document per line
:param nlines: Maximum number of lines from the file to read or None to read all lines
:param remove_stopwords: Set to true if stop words should be removed from document sections before models are
created
:param start_line: Optional parameter, specific line number to start at, mostly for testing purposes
:param lemmatize_words: Set to true if lemmatization should be performed on document sections before models are
created
:param log_base: Optional parameter to specify log base, defaults to ln if not set
:param include_title_lines: Set to true to include lines as marked in title lines in the output, false otherwise
only valid for annotated data input
"""
# Test nltk dependencies
nltk_punkt = nltk.data.find('tokenizers/punkt')
if nltk_punkt is None:
logging.warning(" >NLTK punkt not present: downloading nltk punkt.")
nltk.download('punkt')
else:
logging.info(" >Ok: NLTK punkt present.")
# Run feature list formatter and save output (or notify user this is being skipped)
self.feature_list = feature_list
self.formatted_feature_list = None
if self.feature_list is None:
logging.warning(" >No feature list specified, skipping feature list formatting")
else:
self.formatted_feature_list = self.format_feature_list()
# Run read annotated data (or notify user this is being skipped)
if filename is None:
logging.warning(" >No filename specified, skipping parse step")
else:
if input_type == ParseAndModel.InputType.annotated:
logging.info("Reading data from annotated file")
self.parsed_text = self.read_annotated_data(filename=filename, nlines=nlines, start_line=start_line,
include_title_lines=include_title_lines)
elif input_type == ParseAndModel.InputType.docperline:
logging.info("Reading data from un-annotated file. Assuming one document per line.")
self.parsed_text = self.read_file_data(filename=filename, nlines=nlines, start_line=start_line)
else:
raise Exception("Invalid input type. Options are 'annotated' and 'oneDocPerLine'")
# Build the explicit models and store the output
if self.formatted_feature_list is None:
logging.warning(" >No formatted feature list present, can't build explicit models")
elif self.parsed_text is None:
logging.warning(" >No parsed text present, can't build explicit models")
else:
self.model_results = self.build_explicit_models(remove_stopwords=remove_stopwords,
lemmatize_words=lemmatize_words,
log_base=log_base
)
# self.parsed_text2 = ParseAndModel.read_file_data(filename=filename, nlines=nlines, start_line=start_line)
def format_feature_list(self) -> pd.DataFrame:
"""
This function takes a list of strings and/or lists of strings and converts them to a DataFrame with ids. Terms in
nested lists will be treated as synonyms and given the same feature id
ex. feature_list = format_feature_list(feature_list = ["sound", "battery", ["screen", "display"]])
:return: DataFrame with integer ids for each feature, synonyms are grouped together
| feature (str) | feature_id (int) | feature_term_id (int)
feature: string representation of the feature
feature_id: integer id for the feature, will be the same for synonyms if input in nested list
feature_term_id: integer id for the feature, will be unique for each string, including synonyms
"""
feature_list = self.feature_list
feature_index = 0
feature_term_index = 0
formatted_feature_list = []
# loop through list of features
for feature in feature_list:
if isinstance(feature, str):
formatted_feature_list.append(
{"feature_term_id": feature_term_index, "feature_id": feature_index, "feature": feature})
feature_term_index += 1
elif isinstance(feature, list):
for synonym in feature:
if isinstance(synonym, str):
formatted_feature_list.append(
{"feature_term_id": feature_term_index, "feature_id": feature_index, "feature": synonym})
feature_term_index += 1
else:
raise ValueError(str(feature) + '>' + str(synonym) + ' is not a string or a list of strings')
else:
raise ValueError(str(feature) + ' is not a string or a list of strings')
feature_index += 1
feature_df = pd.DataFrame(formatted_feature_list)
# Save formatted feature list to object
# TODO: [nfr] remove this from here, return feature_df and make assignment in __init__
return feature_df
# TODO: add tests, alterate file formats
def read_annotated_data(self, filename: str, nlines: int = None, start_line: int = 0,
include_title_lines: bool = True) -> dict:
"""
Reads in Santu's annotated files and records the explicit features and implicit features annotated in the file
ex. annotated_data = read_annotated_data(filename='demo_files/iPod.final', nlines=200)
ex. annotated_data = read_annotated_data(filename='demo_files/iPod.final', nlines=2)
:param filename: Filename for the annotated data set
:param nlines: Maximum number of lines from the file to read or None to read all lines
:param start_line: Optional parameter, specific line number to start at, mostly for testing purposes
:param include_title_lines: Set to true to include lines as marked in title lines in the output, false otherwise
:return: a dictionary with the following data
section_list: DataFrame with the following form
| doc_id (int) | section_id (int) | section_text (str) | title (bool) |
doc_id: integer id for the document
section_id: integer id for the section
section_text: cleaned (lowercase, trimmed) section text
title: True if the line is a title, False otherwise
feature_section_mapping: DataFrame
| doc_id (int) | feature (str) | is_explicit (bool) | section_id (int) |
doc_id: integer id for the document
feature: the string form of the feature in the annotation
is_explicit: False if the feature was marked in the annotation as an implicit mention, True otherwise
section_id: integer id for the section
feature_list: dictionary with each feature and the number of sections it appears in
key: feature name
value: number of sections in which the feature appears
"""
doc_id = -1
section_id = 0
section_list = []
feature_section_mapping = []
feature_list = defaultdict(int)
line_number = 0
line_count = 0
with open(filename, 'r') as input_file:
for line in input_file:
# Skip line if before specified start
if line_number < start_line:
# Increment line number
line_number += 1
continue
else:
# Increment line number
line_number += 1
# Section is from new doc, increment doc id
if '[t]' in line:
doc_id += 1
is_title = True
line_text = line.split('[t]')[1].strip().lower()
# Section is from new doc, increment doc id
elif line.startswith('*'):
doc_id += 1
is_title = True
line_text = line.split('*')[1].strip().lower()
# Section not from new doc, just get cleaned text
else:
is_title = False
line_text = line.split('##')[1].strip().lower()
# If we still haven't seen a title increment the document id anyway
if doc_id == -1:
doc_id += 1
# Look for feature annotations attached to the line
feature_string = line.split('##')[0].split(',')
logging.debug(feature_string)
if not is_title and feature_string[0] != '':
# Loop through all the features found in the annotation
for feature in feature_string:
logging.debug(feature)
# Check if the feature in the annotation is marked as an implicit mention
if '[u]' in feature:
explicit_feature = False
logging.debug('implicit')
else:
explicit_feature = True
# Get the actual text of the feature
feature_text = feature.split('[@]')[0]
# Add the feature and section id to the data set
feature_section_mapping.append(
{"doc_id": doc_id, "section_id": section_id, "feature": feature_text,
"is_explicit": explicit_feature})
# Increment the feature in the unique feature list
feature_list[feature_text] += 1
# Check if title lines should be included
if not include_title_lines and is_title:
# Check if max number of lines has been reached yet
line_count += 1
if nlines is not None:
if line_count >= nlines:
break
continue
# Add section line to data set
section_list.append(
{"doc_id": doc_id, "section_id": section_id, "section_text": line_text, "title": is_title})
# Increment section id
section_id += 1
line_count += 1
logging.debug(line)
# Check if max number of lines has been reached yet
if nlines is not None:
if line_count >= nlines:
break
# Bundle and save data set
# TODO: [nfr] remove this from here, return dictionary and make assignment in __init__
return dict(section_list=pd.DataFrame(section_list), feature_mapping= | pd.DataFrame(feature_section_mapping) | pandas.DataFrame |
"""Implements the csv unifier"""
from abc import ABC, abstractmethod
import pandas as pd
import sys
from datetime import datetime
import os
class AbstractStatementTransformer(ABC):
"""Implements the statement class"""
_COLUMNS = ["datetime", "transaction_type", "amount", "from", "to"]
def __init__(self, source_csv_file_path):
try:
self._source_df = | pd.read_csv(source_csv_file_path) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from imblearn.over_sampling import SMOTE
DATA_PATH = '../cell-profiler/measurements'
def load_data(filename, data_path=DATA_PATH):
"""
Read a csv file.
"""
csv_path = os.path.join(data_path, filename)
return | pd.read_csv(csv_path) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import matplotlib.pyplot as plt
import xlrd
# In[3]:
import os
# In[4]:
csv = []
# In[5]:
path = './Data'
for root, dirs, files in os.walk(path):
csv.append(files)
# In[6]:
csv
# In[7]:
# Agreement_csv = ['bittorrent.csv',
# 'dns.csv',
# 'ftp.csv',
# 'httphttps.csv',
# 'pop3.csv',
# 'smtp.csv',
# 'ssh.csv',
# 'telnet.csv']
Agreement_csv = [
'dns.csv',
'ssh.csv',
'telnet.csv']
path = "./Data"
# In[8]:
def strip(data):
columns = data.columns
new_columns = []
for i in range(len(columns)):
new_columns.append(columns[i].strip())
return new_columns
# In[9]:
data = | pd.DataFrame() | pandas.DataFrame |
"""Multivariate Aggregator module."""
__version__ = '2.0.0'
from typing import Dict, List
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from .core.algorithms import lstm_model
from adtk.transformer import PcaReconstructionError
from adtk.data import validate_series
from sklearn.preprocessing import MinMaxScaler
import scipy.stats as stats
from statsmodels.tsa.vector_ar.var_model import VAR
from tensorflow import keras
import joblib
import numpy as np
import pandas as pd
import os
import shutil
from fastapi.staticfiles import StaticFiles
app = FastAPI(
title='Multivariate Aggregator module.',
docs_url='/documentation',
redoc_url='/redoc',
description='Multivariate aggregator based on multivariate time series data.',
version=__version__
)
class ModelPath(BaseModel):
'''Parameters for explanation generation'''
model: str
scaler: str
class MultivariateTimeSeriesData(BaseModel):
'''Data provided for handling model for MultiVariateTimeseriesData'''
data: Dict[str, List[float]]
class TrainMVTS(BaseModel):
'''Data provided for traning lstm for MultiVariateTimeseriesData'''
train_data: MultivariateTimeSeriesData
paths: ModelPath
activation: str = 'relu'
optimizer: str = 'adam'
loss: str = 'mae'
nb_epochs: int = 300
batch_size: int = 64
validation_split: int = 0.15
patience = 20
initial_embeding_dim: int = 128
class AggregatedMVTS(BaseModel):
test_data: MultivariateTimeSeriesData
paths: ModelPath
class BestVAR(BaseModel):
train_data: MultivariateTimeSeriesData
low_order: int = 1
high_order: int = 50
class TrainVAR(BaseModel):
train_data: MultivariateTimeSeriesData
paths: ModelPath
order: int = 1
class TestVAR(BaseModel):
test_data: MultivariateTimeSeriesData
paths: ModelPath
order: int = 1
class AggregatedPCA(BaseModel):
"""Parameters for PCA anomaly detection."""
test_data: MultivariateTimeSeriesData
principal_component: int = 1
class AggregatedOut(BaseModel):
'''Aggregated Score'''
out: List[float]
@app.post('/multivariate-lstm-train')
async def aggregate_multivariate_lstm(mvts_data: TrainMVTS):
"""Apply LSTM reconstruction error to aggregate the Multivariate data"""
train_x = pd.DataFrame.from_dict(mvts_data.train_data.data)
# normalise
scaler = MinMaxScaler()
scaler = scaler.fit(train_x)
train_x = scaler.transform(train_x)
# reshape data
train_x = train_x.reshape(train_x.shape[0], 1, train_x.shape[1])
model = lstm_model(train_x,
mvts_data.initial_embeding_dim,
mvts_data.loss
)
try:
path_to_model = os.path.join('data', mvts_data.paths.model)
model.save(path_to_model)
path_to_scaler = os.path.join('data', mvts_data.paths.scaler)
with open(path_to_scaler, 'wb') as fo:
joblib.dump(scaler, fo)
return {"dump_status": "model is saved successfully"}
except Exception as inst:
return {"dump_status": str(inst)}
@app.post('/aggregate-multivariate-lstm-score', response_model=AggregatedOut)
async def aggregate_multivariate_lstm(mvts_data: AggregatedMVTS):
"""Apply LSTM reconstruction error to aggregate the Multivariate data"""
# load model
path_to_model = os.path.join('data', mvts_data.paths.model)
model = keras.models.load_model(path_to_model)
# get scaler
path_to_scaler = os.path.join('data', mvts_data.paths.scaler)
scaler = joblib.load(path_to_scaler)
# get data
test_x = pd.DataFrame.from_dict(mvts_data.test_data.data)
# normalise
test_x = scaler.transform(test_x)
# reshape data
test_x = test_x.reshape(test_x.shape[0], 1, test_x.shape[1])
# predict
test_x_pred = model.predict(test_x)
# get score
test_score = list(np.mean(np.abs(test_x - test_x_pred), axis=2)[:, 0])
return AggregatedOut(out=test_score)
@app.post('/best-multivariate-var-order')
async def best_multivariate_var_order(mvts_data: BestVAR):
"""Apply VAR to find best lag order"""
# get data
train_data = pd.DataFrame.from_dict(mvts_data.train_data.data)
# add datetime index to data
train_data.index = pd.to_datetime(train_data.index, unit='ms')
AIC = {}
best_aic, best_order = np.inf, 0
for i in range(mvts_data.low_order, mvts_data.high_order):
model = VAR(endog=train_data)
var_result = model.fit(maxlags=i)
AIC[i] = var_result.aic
if AIC[i] < best_aic:
best_aic = AIC[i]
best_order = i
return {"best_order": best_order}
@app.post('/train-multivariate-var')
async def train_multivariate_var(mvts_data: TrainVAR):
"""Train VAR and return var_result"""
# get data
train_data = | pd.DataFrame.from_dict(mvts_data.train_data.data) | pandas.DataFrame.from_dict |
# coding: utf-8
import pandas as pd
import numpy as np
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.models import HoverTool, PanTool, WheelZoomTool, BoxSelectTool, TapTool, OpenURL
from bokeh.models import GMapPlot, GMapOptions, Circle, DataRange1d, Range1d
from bokeh.io import curdoc
from bokeh.layouts import row, column, widgetbox, gridplot
from bokeh.models.widgets import Select, Slider, TextInput, DataTable, TableColumn, Div, Select
import itertools
import os
import logging
logger = logging.getLogger(__name__)
# data file locations
MAP_DATA_FILE = 'proteomz_stns.csv'
PROTEOMZ_DATA_FILE = 'ExampleDataset.csv'
TAXA_KEYS_DATA_FILE = 'Taxa_keys.csv'
# plot tools
TOOLS = "box_zoom, pan, xwheel_zoom, reset"
# global visual parameters
TITLE_TEXT_SIZE = '20px'
# introduction text
INTRODUCTION_HTML = """<b>Ocean Proteomics data from <i>Falkor ProteOMZ Expedition</i> in January-February of 2016.
The prototype interactive display can explore millions of protein values from over a hundred 300 liter samples
collected in the Central Pacific Oxygen Minimum Zone to depths greater than 1 kilometer. Use the sliders and
menus to enable scientific discovery within this novel dataset. <u>*NOTE: This is an example dataset containing <i>shuffled protein
annotations.</i> Public release of this dataset coming soon. </u> """
INTRODUCTION_WIDTH = 380
INTRODUCTION_HEIGHT = 130
# map visual parameters
MAP_WIDTH = 400
MAP_HEIGHT = 750
MAP_TITLE = 'ProteOMZ EXPEDITION FALKOR 2015'
MAP_LAT = 7.29
MAP_LON = -145.73
MAP_ZOOM = 4
# For map to work, uncomment this line and put your own google API key (https://developers.google.com/maps/documentation/javascript/get-api-key)
# MAP_API_KEY =
MAP_TYPE = 'hybrid'
DESELECTED_STATION_COLOR = 'white'
SELECTED_STATION_COLOR = 'red'
# profile visual parameters
PROFILE_TITLE = 'The Vertical Distribution of Microbial Proteins'
PROFILE_X_LABEL = 'Relative Abundance (Spectral Counts)'
PROFILE_Y_LABEL = 'Depth in the Ocean (meters)'
PROFILE_LINE_COLOR = 'red'
MAX_PROFILES = 1200
PROFILE_WIDTH = 600
PROFILE_HEIGHT = 1100
# histogram visual parameters
HISTOGRAM_TITLE = 'All Spectra/IDs'
HISTOGRAM_X_LABEL = 'Sum of Proteins/Spectra'
HISTOGRAM_WIDTH = 400
HISTOGRAM_HEIGHT = 1100
# bar chart visual parameters
TAXA_BAR_TITLE = 'The Diversity of Microbial Proteins'
TAXA_BAR_WIDTH = 600
TAXA_BAR_HEIGHT = 350
TAXA_BAR_COLORS = ["#e6ab02", "#1f78b4", "#b2182b", "#7570b3", "#e7298a", "#66a61e",
"#d95f02", "#666666"] #, "#1b9e77"]
#table settings
TAXON_TABLE_WIDTH=600
TAXON_TABLE_HEIGHT=750
# initial selections
ALL = 'ALL'
INIT_TAXA_GROUP = ALL
INIT_EC_GROUP = ALL
INIT_PCTILE = 95
INIT_NUT = 'N+N'
INIT_PROT = 'P1'
ST_SELECT_TITLE = 'Station'
NUT_SELECT_TITLE = 'Select Hydrographic Parameter for Correlation'
TN_SELECT_TITLE = 'Select Microbial Taxon'
EC_SELECT_TITLE = 'Major Enzyme Classes'
PERCENTILE_SLIDER_TITLE = 'Percentile (Note: be patient below 90%)'
EC_GROUPS = ['Oxidoreductases','Transferases', 'Hydrolases', 'Lyases', 'Isomerases', 'Ligases']
# computing axis ranges
def compute_profile_axis_ranges(z, station_counts):
# compute plot axis ranges for profile plot
max_z, min_z = z.max(), z.min()
min_c, max_c = 0, station_counts.max().max()
return (max_z, min_z), (min_c, max_c)
def compute_histogram_axis_ranges(histogram_datasource):
# compute plot axis ranges for histogram
min_h = 0
max_h = max(histogram_datasource.data['prot_cts']) * 1.5
return (min_h, max_h)
# main container
class Visualization(object):
def __init__(self):
"""read data and construct plot elements in their initial state"""
self.read_data()
z, station_counts, hydrography_counts, all_counts, selected_nut = self.select_initial_data(self.stations[0])
self.construct_datasources(z, station_counts, hydrography_counts, all_counts, selected_nut)
# create plots and widgets
self.make_plots(z, station_counts, hydrography_counts, selected_nut)
self.make_widgets()
def read_data(self):
"""read data and transform into dataframes"""
self._read_map_data()
self._read_proteomz_with_metadata()
def _read_map_data(self):
# second column data source for map stn/lat/long only, single point per stn
self.stn_coor = pd.read_csv(MAP_DATA_FILE, index_col=None)
def _read_proteomz_with_metadata(self):
"""read the large spreadsheet CSV, extract sections, and reorganize into
meaningful dataframes"""
df = pd.read_csv(PROTEOMZ_DATA_FILE, low_memory=False)
# extract metadata section of spreadsheet containing station and depth information
self.cruise_metadata = df[df.columns[:11]][:103]
# stations are in that column of cruise_metadata
self.stations = self.cruise_metadata.Station.unique().astype(int)
# extract counts section of spreadsheet
all_counts = df[df.columns[21:]][:103].transpose()
self.all_counts = all_counts.dropna().astype(float)
#extract hydrographic data
hydrography = df[df.columns[4:17]][:103].transpose()
self.hydrography = hydrography.dropna().astype(float)
# extract metadata section of spreadsheet containing prot id information
data = df[103:]
data.index=data.pop('ID')
for col in data.columns[:10]:
data.pop(col)
prot_metadata = data.transpose()
### For taxonomy information we read a different file
taxa_df = | pd.read_csv(TAXA_KEYS_DATA_FILE) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Key classification
using multiclass Support Vector Machine (SVM)
reference:
Date: Jun 05, 2017
@author: <NAME>
@Library: scikit-learn
"""
import os, glob, random
import numpy as np
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix, precision_score
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from sklearn.cross_validation import KFold
import time
import codecs
import matplotlib.pyplot as plt
import itertools
NEW_LINE = '\r\n'
TRAIN_SIZE = 0.8
def build_data_frame(data_dir):
dirs = next(os.walk(data_dir))[1]
class_names = []
total_amount = []
train_amount = []
test_amount = []
train_data = | DataFrame({'value': [], 'class': []}) | pandas.DataFrame |
from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
def test_between_time_formats(self, frame_or_series):
# GH#11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is Series:
ts = ts[0]
strings = [
("2:00", "2:30"),
("0200", "0230"),
("2:00am", "2:30am"),
("0200am", "0230am"),
("2:00:00", "2:30:00"),
("020000", "023000"),
("2:00:00am", "2:30:00am"),
("020000am", "023000am"),
]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_between_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
ts_local = ts.tz_localize(tzstr)
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1, t2).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_between_time_types(self, frame_or_series):
# GH11818
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
obj = DataFrame({"A": 0}, index=rng)
if frame_or_series is Series:
obj = obj["A"]
msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"
with pytest.raises(ValueError, match=msg):
obj.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
def test_between_time(self, close_open_fixture, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
if frame_or_series is not DataFrame:
obj = obj[0]
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, frame_or_series):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = Series(np.random.randn(len(rng)), index=rng)
if frame_or_series is DataFrame:
ts = ts.to_frame()
stime, etime = ("08:00:00", "09:00:00")
expected_length = 7
assert len(ts.between_time(stime, etime)) == expected_length
assert len(ts.between_time(stime, etime, axis=0)) == expected_length
msg = f"No axis named {ts.ndim} for object type {type(ts).__name__}"
with pytest.raises(ValueError, match=msg):
ts.between_time(stime, etime, axis=ts.ndim)
def test_between_time_axis_aliases(self, axis):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = | DataFrame(rand_data, index=rng, columns=rng) | pandas.DataFrame |
import dash
import dash_html_components as html
from character import Character
from dash.dependencies import Input, Output, State
import plotly.express as px
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import layout
from data import Data
from user import User
import numpy as np
from datetime import timedelta, datetime
import plotly.graph_objects as go
from DataBase import databaseDF
from src.models.load_data import AdvisedPortfolios, Singleton, PriceDB
from skimage import io
from pandas import to_numeric, to_datetime
import pandas as pd
import copy
from pypfopt.discrete_allocation import DiscreteAllocation
sheet = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=sheet,
suppress_callback_exceptions=True)
server = app.server
user = None
def show_content():
style = layout.style
user = User()
# global _user
app.layout = html.Div(layout.main_login, id='main-layout')
check = False
# 3번째 탭용 테스트 데이터 세팅
# risk_profile = 4
# current_date = '2021-06-01'
# userid = 'A001'
# username = '안정추구형소규모'
db = databaseDF()
# advised_pf = AdvisedPortfolios.instance().data
# print('type(advised_pf)'.format(type(advised_pf)))
# print('Here is your advised_pf:')
# print(advised_pf.tail(3))
# db.insert_advised_pf(advised_pf)
@app.callback(
Output('main-layout', 'children'),
[Input('login-button', 'n_clicks'),
Input('sign-up-button', 'n_clicks')],
State('user-id-main', 'value')
)
def show_layout(login, signup, user_id):
if user.name:
user.name = ""
user.userid = ""
user.data = Data()
user.date = ""
print('#1. in show_layout, login: {}, signup: {}, user_id: {}'.format(login, signup, user_id))
if 0 < login:
temp = copy.deepcopy(layout.tab)
temp.children[0].children = temp.children[0].children[1:]
temp.children[0].value = 'analysis'
user.name = copy.deepcopy(user_id)
print('#3. in show_layout, login: {}, signup: {}, user.name: {}'.format(login, signup, user.name))
layout.main_login.children[2].n_clicks = 0
check = False
return temp
if 0 < signup:
layout.main_login.children[5].n_clicks = 0
check = True
layout.tab.children[0].value = 'signup'
return layout.tab
print('login and signup >= 0!! Despite of this, let me set user.name to be user_id {}.'.format(user.name))
return layout.main_login
@app.callback(
Output(layout.output_id, 'children'),
Input(layout.input_id, 'value')
)
def show_page(tab_input):
# global _user
if tab_input == 'signup':
return html.Div(layout.signup)
if tab_input == 'analysis':
if not check:
# 로그인을 했을 경우
# RA자문 탭의 이름과 자문기준일 값을 설정함.
layout.analysis[0].children[1].children = ''
layout.analysis[0].children[3].children = '8/1/2021 4:00:00 PM'
user.date = '8/1/2021 4:00:00 PM'
# layout.analysis[0].children[3].children = user.getStartDate(user.name)
# layout.analysis[0].children[1].children = user.name
# layout.analysis[0].children[3].children = '6/2/2021 4:00:00 PM'
# 처음 로그인할 때 이게 user.name이 none이었음.
# 다른 브라우저로 다른 아이디로 로그인할 때는 이 값이 사용자가 입력한 값을 가짐.
# 세 번째 로그인도 잘 작동.
# 네 번째 로그인도 잘 작동.
# 관찰결과: user.name에 한 세션 이전의 사용자명이 저장되어 있음.
# print('This is user.name: {}'.format(user.name))
return html.Div(layout.analysis)
if tab_input == 'info':
if not check:
# 로그인을 했을 경우
layout.info[0].children[1].children = copy.deepcopy(user.name)
return html.Div(layout.info)
@app.callback(
Output('output-div', 'children'),
Input('submit-val', 'n_clicks'),
State('self-understand-degree', 'value'),
State('age-check', 'value'),
State('invest-terms', 'value'),
State('finance-ratio', 'value'),
State('annual-income', 'value'),
State('character-risk', 'value'),
State('invest-purpose', 'value'),
State('invest-experience', 'value'),
State('invest-scale', 'value'),
State('datetime', 'value'),
State('name', 'value')
)
def page1_result(n_clicks, input_1, input_2, input_3, input_4,
input_5, input_6, input_7, input_8, input_9, input_10,
input_11):
def get_fig(source, width, height):
# Create figure
fig = go.Figure()
# Constants
img_width = width
img_height = height
scale_factor = 0.5
# Add invisible scatter trace.
# This trace is added to help the autoresize logic work.
fig.add_trace(
go.Scatter(
x=[0, img_width * scale_factor],
y=[0, img_height * scale_factor],
mode="markers",
marker_opacity=0
)
)
# Configure axes
fig.update_xaxes(
visible=False,
range=[0, img_width * scale_factor]
)
fig.update_yaxes(
visible=False,
range=[0, img_height * scale_factor],
# the scaleanchor attribute ensures that the aspect ratio stays constant
scaleanchor="x"
)
# Add image
fig.add_layout_image(
dict(
x=0,
sizex=img_width * scale_factor,
y=img_height * scale_factor,
sizey=img_height * scale_factor,
xref="x",
yref="y",
opacity=1.0,
layer="below",
sizing="stretch",
source=source)
)
# Configure other layout
fig.update_layout(
width=img_width * scale_factor,
height=img_height * scale_factor,
margin={"l": 0, "r": 0, "t": 0, "b": 0},
)
return fig
if 0 < n_clicks:
tags_id = [input_1, input_2, input_3, input_4, input_5, input_6, input_7, input_8, input_9,
input_10, input_11]
user.name = input_11
user.date = input_10
print(user.name, user.date)
character = Character(tags_id)
# print('tags_id: {}'.format(tags_id))
output = html.Div([
html.Div(id='character-result')
], id='output-div')
if character.empty_check():
# fig_rpt = go.Figure(go.Image(dx=1008, dy=2592, z=io.imread('./reports/figures/report-4_2021-02-26.png')))
# fig_rpt2 = go.Figure(go.Image(dx=1000, dy=600, z=io.imread('./reports/figures/ef_area-4_2021-02-26.png')))
# fig_rpt3 = go.Figure(go.Image(dx=640, dy=480, z=io.imread('./reports/figures/ef-4_2021-02-26.png')))
# fig_rpt = go.Figure().add_layout_image(source='./reports/figures/report-4_2021-02-26.png')
# fig_rpt2 = go.Figure().add_layout_image(source='./reports/figures/ef_area-4_2021-02-26.png')
# fig_rpt3 = go.Figure().add_layout_image(source='./reports/figures/ef-4_2021-02-26.png')
answer = []
for_selected = layout.signup[3]
for id in tags_id:
check = False
for i in range(1, len(for_selected.children), 3):
for j in range(len(for_selected.children[i].options)):
if for_selected.children[i].options[j]['value'] == id:
answer.append(j+1)
check = True
break
if check:
break
risk_avg, df, by_assetclass, score, current_date, risk_profile = character.predict(
answer, first_trade=True)
rpt_url = 'https://raw.githubusercontent.com/my2582/kisra_storage/main/report-{}_{}.png'.format(risk_profile, current_date)
rpt2_url = 'https://raw.githubusercontent.com/my2582/kisra_storage/main/ef_area-{}_{}.png'.format(risk_profile, current_date)
rpt3_url = 'https://raw.githubusercontent.com/my2582/kisra_storage/main/ef-{}_{}.png'.format(risk_profile, current_date)
# print('URLs:', rpt_url, rpt2_url, rpt3_url)
fig_rpt = get_fig(source=rpt_url, width=1008, height=2592)
fig_rpt2 = get_fig(source=rpt2_url, width=1000, height=600)
fig_rpt3 = get_fig(source=rpt3_url, width=640, height=480)
result = '당신의 점수는 {0}이며 {1}형 투자자입니다. 당신에게 맞는 포트폴리오를 확인해 보세요'.format(
score, risk_avg)
# 파이차트 (종목별)
pie = px.pie(df, names=df.loc[:, 'itemname'], values=df.loc[:, 'weights'],
title="추천 포트폴리오", color_discrete_sequence=px.colors.qualitative.Set3)
# print('-=-=-=-df.columns-=-=-=-=-')
# print(df.columns)
# 바 차트(자산군별)
bar_chart = px.bar(by_assetclass, y='weights', x='asset_class', title='자산군별 비중',
labels={'asset_class': '자산군', 'weights': '비중'},
orientation='v', color="asset_class", color_continuous_scale='darkmint',
template='plotly_dark')
output.children[0].children = result
if len(output.children) < 3:
fig = dcc.Graph(id='pie-chart')
fig.figure = pie
fig.figure.layout.paper_bgcolor = style['pie_chart_style']['backgroundColor']
fig_bar = dcc.Graph(id='bar-chart')
fig_bar.figure=bar_chart
fig_bar.figure.layout.paper_bgcolor = style['pie_chart_style']['backgroundColor']
output.children.append(fig)
output.children.append(fig_bar)
# print('------------fig---------------')
# fig_show = html.Img(class_='picture-show', src="./reports/figures/report-4_2021-02-26.png")
# href = html.A('Download readMe.pdf', download='./reports/figures/report-4_2021-02-26.png', href='/readMe.pdf')
# output.children.append(href)
output.style = style['pie_chart_style']
# fig_rpt['layout'].update(width=1008, height=2592, autosize=False)
# fig_rpt2['layout'].update(width=1000, height=600, autosize=False)
# fig_rpt3['layout'].update(width=640, height=480, autosize=False)
output.children.append(dcc.Graph(id="fig-image", figure=fig_rpt, config={'doubleClick': 'reset'}))
output.children.append(dcc.Graph(id="fig2-image", figure=fig_rpt2, config={'doubleClick': 'reset'}))
output.children.append(dcc.Graph(id="fig3-image", figure=fig_rpt3, config={'doubleClick': 'reset'}))
return output
warning = '비어있는 항목이 있습니다! 전부 체크해 주세요'
if 2 < len(output.children):
output.children = output.children[:-1]
output.children[0].children = warning
output.style = style['pie_chart_style']
return output
def page2_result(content, date, ret, vol, df_comp):
if type(content) == str:
return dcc.ConfirmDialog(
id='confirm',
message=content
)
table_title1 = [html.Thead(html.Tr([html.H4("리밸런싱 전/후 비교")]))]
table_title2 = [html.Thead(html.Tr([html.H4("자산별 구성 및 운용성과")]))]
table_title3 = [html.Thead(html.Tr([html.H4("리밸런싱 과거 내역")]))]
table_header_comp = [
html.Thead(html.Tr([html.Th(col) for col in list(df_comp.columns)]))
]
print('table_header_comp is : {}'.format(table_header_comp))
print('in page2_result, df_comp is', df_comp)
rows = df_comp.values.tolist()
# print(rows)
comp_row = list()
for row in rows:
temp = [html.Td(record) for record in row]
comp_row.extend([html.Tr(temp)])
print('in page2_result, comp_row is', comp_row)
table_header = [
html.Thead(html.Tr([html.Th("시점"), html.Th("Cash"), html.Th(
"Equity"), html.Th("Fixed Income"), html.Th("Alternative"), html.Th("Total"), html.Th("누적수익률(%)"), html.Th("변동성(%)")]))
]
# print('content.date: {}'.format(content.date))
# print('date: {}'.format(date))
latest_content = content.loc[content.date==date, :]
latest_content.value = to_numeric(latest_content.value)
print('content.columns: {}'.format(content.columns))
print('content.shape: {}'.format(content.shape))
print('content: {}'.format(content))
# print('----------------------------')
# print('latest_content.shape: {}'.format(latest_content.shape))
# print('latest_content.columns: {}'.format(latest_content.columns))
# print('latest_content: {}'.format(latest_content))
# print('latest_content.date: {}, date: {}'.format(latest_content.date, date))
# print('latest_content[latest_content[asset_class] == Cash][value]: {}'.format(latest_content.loc[latest_content.asset_class == 'Cash', 'value']))
summary = latest_content.loc[:, ['asset_class', 'value']].groupby('asset_class').sum().reset_index()
total = summary.value.sum()
total = '{:,}'.format(int(total))
latest_content.value = latest_content.value.astype(int).apply(lambda x : "{:,}".format(x))
summary.value = summary.value.astype(int).apply(lambda x : "{:,}".format(x))
row1 = html.Tr([html.Td("현재"), html.Td(summary.loc[summary.asset_class == 'Cash', 'value']),
html.Td(summary.loc[summary.asset_class == 'Equity', 'value']),
html.Td(summary.loc[summary.asset_class == 'Fixed Income', 'value']),
html.Td(summary.loc[summary.asset_class == 'Alternative', 'value']),
html.Td(total),
html.Td('{:.1f}'.format(float(ret)*100)),
html.Td('{:.1f}'.format(float(vol)*100))
])
# print('----page2_result에서 상세내역 찍기 시작---')
# result = user.closeData(select, name=user.name, date=user.date, choice=False)
# print('content 첫줄 보면..')
# print(content.iloc[:1, :3])
# 과거 내역(detail) 중 리밸런싱이 발생한 날짜의 레코드만
result = content.loc[content.original == 'Rebal', :]
print('content.shape: {}, result.shape: {}'.format(content.shape, result.shape))
# RA자문 탭에서 상세잔고내역의 컬럼명/컬럼순서 변경
result = result.loc[:, ['date', 'name', 'itemname', 'price', 'quantity', 'value', 'wt', 'original']]
result.date = to_datetime(result.date).dt.strftime('%Y-%m-%d')
result.loc[:, ['price', 'quantity', 'value']] = result.loc[:, ['price', 'quantity', 'value']].astype(float).astype(int).applymap(lambda x : "{:,}".format(x))
result.loc[:, ['wt']] = (result.loc[:, ['wt']].astype(float)*100).applymap(lambda x : "{:.1f}".format(x))
result = result.rename(columns={
'date':'날짜',
'name':'이름',
'itemname': '종목명',
'price': '종가',
'quantity': '보유수량',
'value': '평가금액',
'wt': '비중(%)',
'original': '납입금여부'
})
table_header_detail = [
html.Thead(html.Tr([html.Th(col) for col in list(result.columns)]))
]
rows = result.values.tolist()
# print(rows)
table_row = list()
for row in rows:
temp = [html.Td(data) for data in row]
table_row.extend([html.Tr(temp)])
print('table_header_detail is {}'.format(table_header_detail))
print('in page2_result, table_row is', table_row)
return html.Div([dbc.Table(table_title1, bordered=False, style = {'margin-top' : '18px',
'margin-bottom' : '10px',
'text-align' : 'left',
'paddingLeft': 12}),
dbc.Table(table_header_comp + [html.Tbody(comp_row)], bordered=True, style = {'margin-top' : '18px',
'margin-bottom' : '10px',
'text-align' : 'left',
'paddingLeft': 12}),
dbc.Table(table_title2, bordered=False, style = {'margin-top' : '18px',
'margin-bottom' : '10px',
'text-align' : 'left',
'paddingLeft': 12}),
dbc.Table(table_header + [html.Tbody([row1])], bordered=True, style = {'margin-top' : '18px',
'margin-bottom' : '10px',
'text-align' : 'left',
'paddingLeft': 12}),
dbc.Table(table_title3, bordered=False, style = {'margin-top' : '18px',
'margin-bottom' : '10px',
'text-align' : 'left',
'paddingLeft': 12}),
dbc.Table(table_header_detail + [html.Tbody(table_row)], bordered=True, style = {'margin-top' : '18px',
'margin-bottom' : '10px',
'text-align' : 'left',
'paddingLeft': 12})])
# return html.Div([dbc.Table(table_title1, bordered=False),
# dbc.Table(table_header_comp + [html.Tbody(comp_row)], bordered=True),
# dbc.Table(table_title3, bordered=False),
# dbc.Table(table_header_detail + [html.Tbody(table_row)], bordered=True)])
# return html.Div([dbc.Table(table_title1, bordered=False),
# dbc.Table(table_header_comp + [html.Tbody(comp_row)], bordered=True),
# dbc.Table(table_title3, bordered=False),
# dbc.Table(table_header_detail + [html.Tbody(table_row)], bordered=True)])
def changePeriod(select):
for idx, sel in enumerate(select):
if select[idx] < 12:
select[idx] = (12-select[idx])*30
continue
if select[idx] < 14:
select[idx] = (14-select[idx])*7
continue
select[idx] = 17-select[idx]
return select
def page3Layout(result, from_date, allowable):
chart, table = result
print('chart: {}, chart.keys(): {}'.format(chart, chart.keys()))
print('table: {}'.format(table))
pie = px.pie(
chart, names=chart['asset_class'].tolist(), values=chart['wt'].tolist())
fig = dcc.Graph(id='pie-chart-page3')
fig.figure = pie
table_header = [
html.Thead(html.Tr([html.Th("종목명"), html.Th(
"평가액"), html.Th("비중(%)"), html.Th("자산군")]))
]
informations = table.loc[:, ['itemname', 'value', 'wt', 'asset_class']]
# informations.loc[:, 'wt'] = informations.loc[:, 'wt']*100
total_value = informations.value.str.replace(',','').astype(float).sum()
total_value = '{:,}'.format(round(total_value))
informations.wt = informations.wt.str.replace(',','').astype(float)
total_wt = informations.wt.sum()
total_wt = '{:.1f}'.format(float(total_wt))
informations.wt = informations.wt.apply(lambda x: '{:.1f}'.format(x))
sumOfInfo = [html.Td('계'), html.Td(total_value), html.Td(total_wt), html.Td('')]
informations = informations.values.tolist()
table_row = list()
for row in informations:
temp = [html.Td(data) for data in row]
table_row.extend([html.Tr(temp)])
table_row.extend([html.Tr(sumOfInfo)])
table_result = html.Div(
dbc.Table(table_header + [html.Tbody(table_row)], bordered=True))
x_axis = [from_date]
now = from_date
while now < allowable:
now += timedelta(days=30)
x_axis.append(now)
y_axis = np.random.randn(2, len(x_axis)).tolist()
y_axis[0].sort()
y_axis[1].sort()
# fig_2 = dcc.Graph(id='line-chart')
# fig_line = go.Figure()
# fig_line.add_trace(go.Scatter(
# x=x_axis, y=y_axis[0], mode='lines+markers', name='before'))
# fig_line.add_trace(go.Scatter(
# x=x_axis, y=y_axis[1], mode='lines+markers', name='after'))
# fig_2.figure = fig_line
return html.Div([fig,
table_result])
def rebalance(rebal_date, price_d, detail, new_port):
'''
Rebalance a portfolio.
Parameters:
rebal_date: str
rebalancing date
detail: DataFrame
current balance
price_d: DataFrame
price data on rebal_date
new_port: DataFrame
A new portfolio. Your current portfolio in `detail` will be rebalanced toward `new_port`.
'''
trading_amt = detail.value.sum()
wt = new_port[['itemcode', 'weights']].set_index('itemcode').to_dict()['weights']
pr = new_port[['itemcode', 'price']].set_index('itemcode').squeeze()
da = DiscreteAllocation(weights=wt, latest_prices=pr, total_portfolio_value=trading_amt)
allocation, remaining_cash = da.greedy_portfolio()
print("리밸런싱 결과:")
print("{}: 새 포트폴리오(종목코드:수량)-{}".format(rebal_date,allocation))
print(" - 매매 후 잔액: {:.2f} KRW".format(remaining_cash))
# 매매한 뒤의 레코드 생성
df_qty = pd.DataFrame.from_dict(allocation, orient='index', columns=['quantity'])
next_detail = new_port.merge(df_qty, left_on='itemcode', right_index=True, how='inner')
next_detail['cost_price'] = next_detail.price.copy()
next_detail['cost_value'] = next_detail.cost_price*next_detail.quantity
next_detail['value'] = next_detail.cost_value.copy()
# 매매하고 남은 돈은 현금으로
df_cash = {
'itemcode': 'C000001',
'quantity': remaining_cash,
'cost_price': 1,
'price':1,
'cost_value': remaining_cash,
'value': remaining_cash,
'itemname': '현금',
'asset_class': 'Cash'
}
df_cash = pd.DataFrame.from_dict(df_cash, orient='index').T
next_detail = pd.concat((next_detail[['itemcode', 'quantity', 'cost_price', 'price', 'cost_value', 'value',
'itemname', 'asset_class']], df_cash), axis=0)
next_detail['wt'] = next_detail.value/next_detail.value.sum()
next_date = datetime.strptime(rebal_date, '%Y-%m-%d')
#next_date = str(next_date.month)+'/'+str(next_date.day)+'/'+str(next_date.year)+' 03:30:00 PM'
next_detail['date'] = next_date
next_detail.reset_index(drop=True, inplace=True)
next_detail['group_by'] = ''
next_detail = pd.merge(next_detail,
price_d.loc[price_d.date==rebal_date, ['date', 'itemcode']],
left_on=['date', 'itemcode'],
right_on=['date', 'itemcode'], how='left')
next_detail['username'] = username
next_detail['userid'] = userid
next_detail['original'] = 'Rebal'
next_detail = next_detail.rename(columns={'weights':'wt'})
next_detail = next_detail[['itemcode', 'quantity', 'cost_price', 'price', 'cost_value', 'value',
'itemname', 'asset_class', 'date', 'userid', 'username', 'group_by',
'original', 'wt']]
return next_detail
def get_next_portfolio(first_trade=False, new_units=None, prices=None, remaining_cash=None, detail=None):
price_db = PriceDB.instance().data
advised_pf = AdvisedPortfolios.instance().data
# 시뮬레이션 기간은 현재일(current_date) 다음 날부터 추천 포트폴리오가 존재하는 마지막날까지임.
dates = advised_pf.loc[(advised_pf.risk_profile == risk_profile) & (
advised_pf.date > current_date), 'date'].min()
rebal_dates = dates
dt = rebal_dates # dt랑 rebal_dates, dates 다 똑같음.
print('리밸런싱 일자: ', rebal_dates)
# return할 때 필요한 첫날의 추천 포트 폴리오와 asset class별 정보 수집
new_port = advised_pf.loc[(advised_pf.date == rebal_dates) & (
advised_pf.risk_profile == risk_profile), :]
first_advised_port = new_port.loc[:, ['weights', 'itemname', 'itemcode']].groupby(
['itemname', 'itemcode']).sum().reset_index()
by_assetclass = new_port.loc[:, ['weights', 'asset_class']].groupby(
'asset_class').sum().sort_values('weights', ascending=False).reset_index()
# next_detail = copy.deepcopy(detail)
next_detail = detail
all_the_nexts = | pd.DataFrame(columns=next_detail.columns) | pandas.DataFrame |
# text_association.py - calculates the similarity between the text and the influencers
import pandas as pd
from .text_cleaner import *
import re
from collections import Counter
import numpy as np
import pickle
from scipy.special import softmax
import tensorflow as tf
class TextProcessor(object):
def __init__(self):
# Load the required files
with open("text_processing/finetuned_s90_10_word_trait_array.pickle", "rb") as f:
self.word_df = pickle.load(f)
# Generate word map from AGDS
self.word_map = self.word_df.columns.tolist()
# Read archetype list and clean it up
self.arch_df = pd.read_csv("text_processing/archetypes_pl_new.csv", header=0, index_col=0)
self.arch_df = self.arch_df.fillna(2)
self.arch_df = self.arch_df[~self.arch_df.index.duplicated(keep='first')]
# Generate trait list
self.trait_list = self.arch_df.columns.tolist()
# Load LSTM model
self.test_model = tf.keras.models.load_model("text_processing/nn_model")
def extract_hashtags(self, post_text):
HASH_RE = re.compile(r"\#\w+")
out_list = re.findall(HASH_RE, post_text)
return out_list
def get_trait_dot_product(self, post_text: str) -> list:
# Filter out the text
filtered_post = remove_stopwords(clean_up_text(post_text))
filtered_post += self.extract_hashtags(post_text)
# Create a vector for dot product vector
post_vector = [0] * len(self.word_map)
# Calculate word occurrences
word_ctr = Counter(filtered_post)
for word, freq in word_ctr.items():
if word in self.word_map:
post_vector[self.word_map.index(word)] = freq
# Calculate dot product for a given text
word_dot = self.word_df.dot(post_vector)
out_vec = pd.Series()
for trait in self.trait_list:
out_vec = out_vec.append(pd.Series([np.argmax(softmax(word_dot.loc[trait]))], index=[trait]))
return out_vec
# Trait accuracy - round the results
def natural_round(x: float) -> int:
out = int(x // 1)
return out + 1 if (x - out) >= 0.5 else out
def accuracy_per_trait(input_vector: pd.Series, annotated_vector: pd.Series) -> np.array:
out_array = np.array([0] * 37, dtype=np.int)
for i in range(len(out_array)):
if input_vector[i] == annotated_vector[i]:
out_array[i] = 1
return out_array
# Method for calculating the similarity
def calculate_similarity(self, post_text: str) -> (pd.Series, pd.Series):
# Calculate word-trait dot product
post_result = self.get_trait_dot_product(post_text)
# Generate new dataframe - one row per influencer
inf_df = | pd.Series(index=self.arch_df.index) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 13 11:22:34 2022
@author: mariaolaru
"""
import os
import pandas as pd
import numpy as np
import statsmodels.api as sm
import scipy.stats as stat
import xarray as xr
from matplotlib import pyplot as plt
import math
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.optim as optim
from datetime import date
import datetime
def get_files(data_dir):
files = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
if '.DS_Store' in files:
i = files.index('.DS_Store')
del files[i]
return files
def make_dir(fp, dirname):
path = os.path.join(fp, dirname)
if not os.path.isdir(path):
os.mkdir(path)
return path
def preproc_data(df, label, file=None):
if label == 'pkg':
df_out = df[df['Off_Wrist'] == 0]
col_keep = ['Date_Time', 'BK', 'DK', 'Tremor_Score']
df_out = df_out.loc[:, col_keep]
df_out = df_out.rename(columns={'Date_Time': 'pkg_dt',
'BK': 'pkg_bk',
'DK': 'pkg_dk',
'Tremor_Score': 'pkg_tremor'})
df_out['pkg_bk'] = df_out['pkg_bk']*-1
elif label == 'apple':
df_out = df.rename(columns={'time': 'timestamp'})
if 'tremor' in file:
df_out = df_out.rename(columns={'probability': 'apple_tremor'})
if 'dyskinesia' in file:
df_out = df_out.rename(columns={'probability': 'apple_dk'})
return df_out
def merge_targets(dfp, df_out):
if df_out.empty:
return dfp
df_merged = df_out.merge(dfp, how='outer')
return df_merged
def print_loop(i, num, message, file):
print('(' + str(i+1) + '/' + str(num) + ') ' + message + ': ', file)
def preproc_files(data_dir):
files = get_files(data_dir)
nfiles = len(files)
df_pkg = pd.DataFrame([])
df_apple = pd.DataFrame([])
for i in range(nfiles):
file = files[i]
print_loop(i, nfiles, 'preprocessing file', file)
file_fp = os.path.join(data_dir, file)
df = pd.read_csv(file_fp)
if 'BK' in df.columns: #pkg data
dfp = preproc_data(df, 'pkg')
df_pkg = merge_targets(dfp, df_pkg)
pkg_dir = make_dir(data_dir, 'orig_pkg')
os.replace(file_fp, os.path.join(pkg_dir, file))
if 'time' in df.columns: #apple data
dfp = preproc_data(df, 'apple', file)
df_apple = merge_targets(dfp, df_apple)
apple_dir = make_dir(data_dir, 'orig_apple')
os.replace(file_fp, os.path.join(apple_dir, file))
if not df_pkg.empty:
out_pkg_file = 'pkg_2min_scores.csv'
df_pkg.to_csv(os.path.join(data_dir, out_pkg_file), index=False)
if not df_apple.empty:
out_apple_file = 'apple_1min_scores.csv'
df_apple.to_csv(os.path.join(data_dir, out_apple_file), index=False)
def pivot_df(df):
#assumes values of pivot table are in column #1 and columns are column #0 & #2
dfp = df.pivot_table(index = df.index,
values = [df.columns[1]],
columns = [df.columns[0], df.columns[2]])
dfp.columns = ['_'.join(map(str, col)) for col in dfp.columns]
return dfp
def average_2min_scores(df_psd):
#find indices of timestamp on even minutes
s = pd.Series(df_psd.index.minute % 2 == 1)
odd_i = s[s].index.values
odd_prev_i = odd_i-1
diff = (df_psd.index[odd_i] - df_psd.index[odd_prev_i]).astype('timedelta64[m]')
s = pd.Series(diff == 1)
s_i = s[s].index.values
keep_i = odd_i[s_i]
if keep_i[0] == 0:
keep_i = np.delete(keep_i, 0)
ts_2min = df_psd.index[keep_i]
colnames = df_psd.columns
colnames_2min = [sub.replace('min1', 'min2') for sub in colnames]
df_psd_avg1 = df_psd.iloc[keep_i, :].reset_index(drop=True).to_numpy()
df_psd_avg2 = df_psd.iloc[keep_i-1, :].reset_index(drop=True).to_numpy()
df_avg = np.mean([df_psd_avg1, df_psd_avg2], axis=0)
df_psd_2min = pd.DataFrame(data = df_avg,
columns = colnames_2min,
index = ts_2min)
return df_psd_2min
def add_2min_scores(df_psd):
colnames = df_psd.columns
addl = 'min1_'
addl_colnames = [addl + s for s in colnames]
df_psd.columns = addl_colnames
df_psd_2min = average_2min_scores(df_psd)
df_merged = df_psd.merge(df_psd_2min, how = 'outer', left_index = True, right_index = True)
return df_merged
def merge_df(df_psd, df_target, get_2min_scores=False):
if len(df_psd.index.unique()) < len(df_psd.index): #assumes this is long-form power spectra data
df_psd = pivot_df(df_psd)
if (get_2min_scores==True):
df_psd = add_2min_scores(df_psd)
if df_target.empty:
return df_psd
df_out = df_target.merge(df_psd, left_index=True, right_index=True, sort = True)
return df_out
def add_timestamps(df, file):
colname = 'timestamp'
unit = 'ms'
ts_pkg = 'pkg_dt'
ts_out = 'timestamp_dt'
if ts_pkg in df.columns:
colname = ts_pkg
unit = 'ns'
elif colname in df.columns:
if len(str(int(df['timestamp'].head(1).values))) == 10:
unit = 's'
elif colname not in df.columns:
raise ValueError('This file does not contain a timestamp column header: '
+ file)
df[ts_out] = pd.to_datetime(df[colname], unit = unit)
if colname == ts_pkg:
df[ts_out] = df[ts_out]
df[ts_out] = df[ts_out] + pd.DateOffset(minutes=1)
df = df.drop(ts_pkg, axis = 1)
else:
df[ts_out] = df[ts_out] - pd.Timedelta(7, unit = 'hours') #Assumes local time is America/Los_Angeles
df = df.set_index([ts_out])
return df
def check_overlap(colnames, feature_key, target_list):
merge_data = False
if colnames[1] in target_list:
merge_data = True
if feature_key in colnames:
merge_data = True
return merge_data
def merge_dfs(data_dir, feature_key, targets):
files = get_files(data_dir)
nfiles = len(files)
df_out = pd.DataFrame([])
for i in range(nfiles):
file = files[i]
file_fp = os.path.join(data_dir, file)
df = pd.read_csv(file_fp)
df = add_timestamps(df, file)
merge_data = check_overlap(df.columns, feature_key, targets)
if merge_data:
print_loop(i, nfiles, 'merging file', file)
df_out = merge_df(df, df_out, get_2min_scores=True)
df_out = order_features(df_out, feature_key) #Assumes feature_key has spectra that is not ordered
#df_out = df_out.dropna()
if feature_key == 'spectra':
feature_i = [j for j, s in enumerate(df_out.columns) if feature_key in s]
df_out.iloc[:, feature_i] = np.log10(df_out.iloc[:, feature_i])
return df_out
def scale_data(df, scaler_type):
scaler = get_scaler(scaler_type)
df_scale = pd.DataFrame(data = scaler.fit_transform(df),
columns = df.columns,
index = df.index)
return df_scale
def get_scaler(scaler):
scalers = {
"minmax": MinMaxScaler,
"standard": StandardScaler,
"maxabs": MaxAbsScaler,
"robust": RobustScaler,
}
return scalers.get(scaler.lower())()
def get_targets(colnames, target_list):
targets = list(set(target_list) & set(colnames))
return targets
def get_frequencies(colnames):
frequencies = np.array([])
for i in range(len(colnames)):
if '+' in colnames[i]: #assumes frequency column headers include contact channel w/ '+'
x = colnames[i].split('_')[-2]
x = float(x)
frequencies = np.append(frequencies, x)
return np.unique(frequencies)
def sort_contacts(contacts):
num_contacts = len(contacts)
contacts_sorted = np.empty(num_contacts, dtype='<U32')
contact_start = np.zeros(num_contacts)
for i in range(num_contacts):
contact_end_i = contacts[i].find('-')
contact_start[i] = int(contacts[i][1:contact_end_i])
contacts_sorted_i = np.argsort(contact_start)
for i in range(num_contacts):
contacts_sorted[i] = contacts[contacts_sorted_i[i]]
return contacts_sorted
def get_contacts(colnames):
contacts = np.array([])
for i in range(len(colnames)):
if '+' in colnames[i]: #assumes channel column headers include '+'
x = '+' + colnames[i].split('+')[1]
contacts = np.append(contacts, x)
contacts = sort_contacts(np.unique(contacts))
return contacts
def order_features(df, feature_key):
feature_i = [j for j, s in enumerate(df.columns) if feature_key in s]
target_i = list(set(np.arange(0, df.shape[1])).difference(feature_i))
colnames = df.columns[feature_i]
contacts = get_contacts(colnames)
df_ordered = pd.DataFrame([])
for contact in contacts:
contact_i = [i for i, s in enumerate(df.columns) if contact in s]
df_ordered = pd.concat([df_ordered, df.iloc[:, contact_i]], axis = 1)
df_out = pd.merge(df_ordered, df.iloc[:, target_i], left_index = True, right_index = True)
return df_out
def get_psd(df, feature_key):
feature_i = [j for j, s in enumerate(df.columns) if feature_key in s]
df_in = df.iloc[:, feature_i]
contacts = get_contacts(df_in.columns)
frequencies = get_frequencies(df_in.columns)
return [df_in, contacts, frequencies]
def reshape_data(df, feature_key, targets, psd_only = False):
[df_psd, contacts, frequencies] = get_psd(df, feature_key)
df_psd.index.name = 'measure'
description = 'spectral overlaid increments'
da = c2dto4d(df_psd, contacts, frequencies, description)
if (psd_only == True):
return da
cols_keep = list(set(targets) & set(df.columns))
data_keep = df.loc[:, cols_keep]
da_redund = xr.DataArray(
dims = ['time_interval', 'contact', 'measure', 'feature'],
coords = dict(
time_interval = da['time_interval'].values,
contact = da['contact'].values,
measure = data_keep.index,
feature = cols_keep))
for i in range(len(da_redund['contact'].values)):
contact = da_redund['contact'].values[i]
da_redund.loc[dict(contact = contact)] = data_keep.values
da = xr.concat([da, da_redund], dim='feature')
return da
def get_single_meta_data(index, interval):
df_variable = pd.DataFrame(columns = ['start_date',
'stop_date',
'hours',
'minutes'])
start_date = np.array([0, 0, 0])
start_date[0] = pd.DatetimeIndex(index).year[0]
start_date[1] = pd.DatetimeIndex(index).month[0]
start_date[2] = pd.DatetimeIndex(index).day[0]
stop_date = np.array([0,0,0])
stop_date[0] = pd.DatetimeIndex(index).year[len(index)-1]
stop_date[1] = pd.DatetimeIndex(index).month[len(index)-1]
stop_date[2] = pd.DatetimeIndex(index).day[len(index)-1]
df_variable.loc[0, 'start_date'] = str(start_date[0]) + '-' + str(start_date[1]) + '-' + str(start_date[2])
df_variable.loc[0, 'stop_date'] = str(stop_date[0]) + '-' + str(stop_date[1]) + '-' + str(stop_date[2])
h = math.floor(len(index)/interval/60)
m = math.floor(((len(index)/interval/60)-h)*60)
df_variable.loc[0, 'hours'] = h
df_variable.loc[0, 'minutes'] = m
return df_variable
def get_meta_data(data_dir, feature_key):
files = get_files(data_dir)
nfiles = len(files)
df_psd = pd.DataFrame([])
df_pkg = pd.DataFrame([])
df_apple = pd.DataFrame([])
df_times = pd.DataFrame(columns = ['start_date',
'stop_date',
'hours',
'minutes'],
index = ['psd', 'pkg', 'apple',
'psd-pkg', 'psd-apple', 'pkg-apple',
'psd-pkg-apple'])
for i in range(nfiles):
file = files[i]
print_loop(i, nfiles, 'processing data', file)
file_fp = os.path.join(data_dir, file)
df = pd.read_csv(file_fp)
if 'pkg' in file:
df_pkg = add_timestamps(df, file)
interval = 2
df_times.loc['pkg', :] = get_single_meta_data(df_pkg.index, interval).values
elif 'apple' in file:
df_apple = add_timestamps(df, file)
df_apple = df_apple.drop(['timestamp'], axis = 1)
interval = 1
df_times.loc['apple', :] = get_single_meta_data(df_apple.index, interval).values
elif 'psd' in file:
df_psd = add_timestamps(df, file)
df_psd = df_psd.drop(['timestamp'], axis = 1)
interval = 1
df_times.loc['psd', :] = get_single_meta_data(df_psd.index.unique(), interval).values
if (not df_pkg.empty) & (not df_psd.empty):
df_pkg_psd = merge_df(df_psd, df_pkg)
if not df_pkg_psd.empty:
interval = 2
df_times.loc['psd-pkg', :] = get_single_meta_data(df_pkg_psd.index, interval).values
if (not df_apple.empty) & (not df_psd.empty):
df_apple_psd = merge_df(df_psd, df_apple)
if not df_apple_psd.empty:
interval = 1
df_times.loc['psd-apple', :] = get_single_meta_data(df_apple_psd.index, interval).values
if (not df_pkg.empty) & (not df_apple.empty):
df_pkg_apple = merge_df(df_pkg, df_apple)
if not df_pkg_apple.empty:
interval = 1
df_times.loc['pkg-apple', :] = get_single_meta_data(df_pkg_apple.index, interval).values
if (not df_pkg.empty) & (not df_apple.empty) & (not df_psd.empty):
df_psd_pkg_apple = merge_df(df_psd, df_pkg_apple)
if not df_psd_pkg_apple.empty:
interval = 2
df_times.loc['psd-pkg-apple', :] = get_single_meta_data(df_psd_pkg_apple.index, interval).values
out_dir = make_dir(data_dir, 'tables')
filename = 'meta_timetable'
df_times.to_csv(os.path.join(out_dir, filename+'.csv'))
plot_table(df_times, data_dir, filename)
return df_times
def plot_table(df, data_dir, filename):
fig, ax = plt.subplots()
# hide axes
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
ax.table(cellText=df.values, colLabels=df.columns, loc='center', rowLabels=df.index)
fig.tight_layout()
plt.show()
out_dir = make_dir(data_dir, 'tables')
fig.savefig(os.path.join(out_dir, filename+'.pdf'))
def get_psd_overlaps(df_times, target_list):
if df_times.loc['psd-apple', :].isnull().any():
indx_rm = [i for i, s in enumerate(target_list) if 'apple' in s]
target_list = np.delete(target_list, indx_rm)
if df_times.loc['psd-pkg', :].isnull().any():
indx_rm = [i for i, s in enumerate(target_list) if 'pkg' in s]
target_list = np.delete(target_list, indx_rm)
return target_list
def compute_pca_2d(df, pca, pc_labels, domain):
pcs = pca.fit_transform(df.values).T
columns = df.index
if domain == 'frequency':
pcs = pca.fit_transform(df.T.values).T
columns = df.columns
df_pcs = pd.DataFrame(data = pcs,
columns = columns,
index = pc_labels)
return df_pcs
def compute_pca(da_psd, ncomponents, domain):
pc_nums = np.linspace(1, ncomponents, ncomponents).astype(int).astype(str).tolist()
pc_labels = ['pc' + sub for sub in pc_nums]
pca = PCA(n_components=ncomponents)
if domain == 'frequency':
da_psd = da_psd.sel(time_interval='min1')
da = xr.DataArray(
dims = ['contact', 'pc', 'feature'],
coords=dict(
contact=da_psd['contact'].values,
pc=pc_labels,
feature=da_psd['feature'].values,
),
attrs=dict(description='PCs in ' + domain + ' domain'),
)
contacts = da['contact'].values
df_pc_ratio = pd.DataFrame([], columns = contacts, index = pc_labels)
for i in range(len(contacts)):
contact = contacts[i]
df = pd.DataFrame(da_psd.sel(contact = contact).values,
columns = da_psd.feature.values,
index = da_psd.measure.values)
df = df.dropna()
df_pcs = compute_pca_2d(df, pca, pc_labels, domain)
da.loc[dict(contact=contact)] = df_pcs
df_pc_ratio.iloc[:, i] = pca.explained_variance_ratio_
elif domain == 'time':
df_pcs = compute_pca_2d(da_psd, pca, pc_labels, domain)
da = df_pcs
df_pc_ratio = pca.explained_variance_ratio_
return [da, df_pc_ratio]
def compute_spectra_stats(df, feature_key):
[df_psd, contacts, frequencies] = get_psd(df, feature_key)
feature_1min = [j for j, col in enumerate(df_psd.columns) if 'min1' in col]
df_psd_1min = df_psd.iloc[:, feature_1min]
#get statistics
stats = pd.DataFrame([], columns = df_psd_1min.columns,
index = ['mean', 'sem', 'ci_lower', 'ci_upper'])
confidence_level = 0.95
degrees_freedom = df_psd_1min.shape[0] - 1
stats.loc['mean'] = df_psd_1min.mean(axis=0)
stats.loc['sem'] = stat.sem(df_psd_1min, axis=0)
[stats.loc['ci_lower'], stats.loc['ci_upper']] = stat.t.interval(confidence_level,
degrees_freedom,
stats.loc['mean'].astype(float),
stats.loc['sem'].astype(float))
#convert into 3d array
description = 'Summary statistics for spectra'
da = c2dto3d(stats, contacts, frequencies, description)
return da
def train_val_test_split(X, y, test_ratio, shuffle=True):
val_ratio = test_ratio / (1 - test_ratio)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio, random_state = 42, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_ratio, random_state = 42, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
def compute_correlation(df, feature_key, target):
if 'apple' in target:
addl = 'min1_'
if 'pkg' in target:
addl = 'min2_'
feature_key_addl = addl + feature_key
feature_i = [j for j, s in enumerate(df.columns) if feature_key_addl in s]
df = df.dropna()
X = df.iloc[:, feature_i]
y = df.loc[:, target]
df_corrs = pd.DataFrame(index = ['r', 'r_pval'], columns = X.columns)
for i in range(X.shape[1]):
df_corrs.iloc[:, i] = stat.pearsonr(X.iloc[:, i], y)
return df_corrs
def c2dto3d(df, contacts, frequencies, description):
dim = len(contacts)
df_3d = np.array(np.hsplit(df, dim))
da = xr.DataArray(data = df_3d,
dims = ['contact', 'measure', 'feature'],
coords = dict(
contact=contacts,
measure=df.index.values,
feature=frequencies
),
attrs=dict(description=description),
)
return da
def c2dto4d(df, contacts, frequencies, description):
dim = len(contacts)
df_list = []
time_intervals = ['min1', 'min2']
for i in range(len(time_intervals)):
time_interval = time_intervals[i]
feature_i = [j for j, s in enumerate(df.columns) if time_interval in s]
dft = df.iloc[:, feature_i]
df_3d = np.array(np.hsplit(dft, dim))
df_list.append(df_3d)
#debug sanity check
#for i in range(df.shape[0]):
# plt.plot(df_3d[0,i, :], color= 'b', alpha = 0.01)
df_4d = np.stack(df_list)
da = xr.DataArray(data = df_4d,
dims = ['time_interval', 'contact', 'measure', 'feature'],
coords = dict(
time_interval = time_intervals,
contact=contacts,
measure=df.index.values,
feature=frequencies
),
attrs=dict(description=description),
)
return da
def compute_correlations(df, feature_key, target_list):
contacts = get_contacts(df.columns)
frequencies = get_frequencies(df.columns)
targets = list(set(target_list) & set(df.columns))
da = xr.DataArray(
dims = ['target', 'contact', 'measure', 'feature'],
coords=dict(
target=targets,
contact=contacts,
measure=['r', 'r_pval'],
feature=frequencies
),
attrs=dict(description='Pearson r test'),
)
for i in range(len(targets)):
target = targets[i]
print_loop(i, len(targets), 'correlating', target)
df_corrs = compute_correlation(df, feature_key, target)
description = 'Pearsonr for ' + target
df_corrs_3d = c2dto3d(df_corrs, contacts, frequencies, description)
da.loc[dict(target=target)] = df_corrs_3d
return da
def plot_psds(da, measure, out_gp, filename, feature_key):
if measure == 'overlaid':
suptitle_text = 'spectral overlay'
elif measure == 'ci':
suptitle_text = 'spectral summary'
dirname = os.path.basename(out_gp)
contacts = da['contact'].values
num_plots = len(contacts)
ncols = math.ceil(num_plots/2)
fig, axs = plt.subplots(nrows = 2, ncols = ncols, figsize=(5*ncols, 15))
plt.rcParams.update({'font.size': 16})
plt.setp(axs[-1, :], xlabel = 'Frequencies (Hz)')
plt.setp(axs[:, 0], ylabel = 'log10(Power)')
fig.suptitle(dirname + ' ' + suptitle_text)
colors = ['#3976AF', '#F08536', '#519D3E', '#C63A32', '#8D6BB8', '#84584E', '#D57FBE', '#BDBC45', '#56BBCC']
ylim = [-10, -2]
if feature_key == 'fooof_flat':
ylim = [-2, 2]
for i in range(len(contacts)):
contact = contacts[i]
ax = fig.get_axes()[i]
ax.set_ylim(ylim)
ax.set_xlim([0, 100])
ax.axvspan(4, 8, color = 'grey', alpha = 0.1)
ax.axvspan(13, 30, color = 'grey', alpha = 0.1)
ax.axvspan(60, 90, color = 'grey', alpha = 0.1)
ax.set_title(contact)
if (measure == 'overlaid'):
df = pd.DataFrame(da.sel(contact = contact).values,
columns = da.feature.values,
index = da.measure.values)
alpha = 0.1
if df.shape[0] > 3000:
alpha = 0.01
for j in range(df.shape[0]):
df_singrow = df.iloc[j, :]
ax.plot(df_singrow.index,
df_singrow.values,
color = colors[0],
alpha = alpha)
elif (measure == 'ci'):
df = pd.DataFrame(da.sel(contact = contact).values,
columns = da.feature, index = da.measure)
ax.plot(df.columns, df.loc['mean'].astype(float), color = colors[0])
ax.fill_between(df.columns,
df.loc['ci_lower'].astype(float),
df.loc['ci_upper'].astype(float),
alpha = 0.5,
color = colors[0])
make_dir(out_gp, 'plots')
out_dir= make_dir(os.path.join(out_gp, 'plots'), feature_key)
fig.savefig(os.path.join(out_dir, filename))
def plot_pca(da, measure, out_gp, filename, feature_key):
dirname = os.path.basename(out_gp)
contacts = da['contact'].values
pcs = da['pc'].values
num_plots = len(contacts)
ncols = math.ceil(num_plots/2)
fig, axs = plt.subplots(nrows = 2, ncols = ncols, figsize=(5*ncols, 15))
plt.rcParams.update({'font.size': 16})
plt.setp(axs[-1, :], xlabel = 'Frequencies (Hz)')
plt.setp(axs[:, 0], ylabel = 'component values')
fig.suptitle(dirname + ' ' + 'top PCs ' + measure + ' domain')
colors = ['#3976AF', '#F08536', '#519D3E', '#C63A32', '#8D6BB8', '#84584E', '#D57FBE', '#BDBC45', '#56BBCC']
for i in range(len(contacts)):
contact = contacts[i]
ax = fig.get_axes()[i]
ax.set_xlim([0, 100])
ax.axhline(y=0, color = 'grey')
ax.axvspan(4, 8, color = 'grey', alpha = 0.1)
ax.axvspan(13, 30, color = 'grey', alpha = 0.1)
ax.axvspan(60, 90, color = 'grey', alpha = 0.1)
ax.set_title(contact)
if (measure == 'frequency'):
df = pd.DataFrame(da.sel(contact = contact).values,
index = da.pc.values,
columns = da.feature.values)
for j in range(df.shape[0]):
pc = pcs[j]
df_sngl = df.iloc[j, :]
ax.plot(df_sngl.index,
df_sngl.values,
color = colors[j],
label = pc)
elif (measure == 'time'):
print('inp, need to write code')
if i == 0:
fig.legend(ncol = 2, loc = 'upper right', prop={"size":10}, title = 'components')
make_dir(out_gp, 'plots')
out_dir= make_dir(os.path.join(out_gp, 'plots'), feature_key)
fig.savefig(os.path.join(out_dir, filename))
def plot_corrs(da, measure, out_gp, filename, feature_key):
#dims = da.coords.dims
dirname = os.path.basename(out_gp)
target_vals = da['target'].values
num_plots = len(target_vals)
ncols = math.ceil(num_plots/2)
nrows = 2
fig, axs = plt.subplots(nrows = nrows, ncols = ncols, figsize=(5*ncols, 15))
plt.rcParams.update({'font.size': 16})
xlabel = 'Frequencies (Hz)'
ylabel = 'Pearson ' + measure
if num_plots <= nrows:
plt.setp(axs[-1], xlabel = xlabel)
plt.setp(axs[:], ylabel = ylabel)
else:
plt.setp(axs[-1, :], xlabel = xlabel)
plt.setp(axs[:, 0], ylabel = ylabel)
fig.suptitle(dirname)
colors = ['#3976AF', '#F08536', '#519D3E', '#C63A32', '#8D6BB8', '#84584E', '#D57FBE', '#BDBC45', '#56BBCC']
for i in range(len(target_vals)):
target = target_vals[i]
ax = fig.get_axes()[i]
ax.set_ylim([-1, 1])
ax.set_xlim([0, 100])
ax.axhline(y=0, color = 'grey')
ax.axvspan(4, 8, color = 'grey', alpha = 0.1)
ax.axvspan(13, 30, color = 'grey', alpha = 0.1)
ax.axvspan(60, 90, color = 'grey', alpha = 0.1)
ax.set_title(target)
df = pd.DataFrame(da.sel(target = target,
measure = measure).values.T,
columns = da.contact, index = da.feature)
for j in range(len(df.columns)):
channel = df.columns[j]
ax.plot(df.index, df[channel], label = channel, color = colors[j])
if i == 0:
fig.legend(ncol = 2, loc = 'upper right', prop={"size":10}, title = 'channels')
fig.tight_layout()
make_dir(out_gp, 'plots')
out_dir= make_dir(os.path.join(out_gp, 'plots'), feature_key)
fig.savefig(os.path.join(out_dir, filename))
return df
def plot_spectra(da, da_type, measure, out_gp, feature_key):
if 'time_interval' in da.dims:
da = da.sel(time_interval = 'min1')
dirname = os.path.basename(out_gp)
if (da_type == 'corr'):
filename = dirname + '_corr_' + measure + '_linegraphs.pdf'
plot_corrs(da, measure, out_gp, filename, feature_key)
elif (da_type == 'psd'):
filename = dirname + '_spectra_' + measure + '_linegraphs.pdf'
plot_psds(da, measure, out_gp, filename, feature_key)
elif (da_type == 'pca'):
filename = dirname + '_pca_' + measure + '_linegraphs.pdf'
plot_pca(da, measure, out_gp, filename, feature_key)
def plot_timeseries(dfl_top_ts, out_gp, feature_key, time='datetime'):
dirname = os.path.basename(out_gp)
target_vals = list(dfl_top_ts.keys())
nplots = len(target_vals)
xlabel = 'Time (datetime)'
if time == 'samples':
xlabel = 'Time (samples)'
fig, axs = plt.subplots(nrows = nplots, ncols = 1, figsize=(15, 5*nplots))
plt.rcParams.update({'font.size': 16})
plt.setp(axs[-1], xlabel = xlabel)
fig.text(0.007, 0.5, 'scores (normalized)', ha="center", va="center", rotation=90)
fig.suptitle(dirname + ' timeseries')
colors = ['#3976AF', '#F08536', '#519D3E', '#C63A32', '#8D6BB8', '#84584E', '#D57FBE', '#BDBC45', '#56BBCC']
locs = ['upper right', 'center right', 'lower right']
for i in range(len(target_vals)):
target = target_vals[i]
ax = fig.get_axes()[i]
ax.set_ylim([0, 1])
ax.set_title(target)
df = | pd.DataFrame(dfl_top_ts[target]) | pandas.DataFrame |
"""Functions to interactively cut the data into buckets and plot the results"""
__version__ = '0.1.0' # Ensure this is kept in-sync with VERSION in the SETUP.PY
############
# Contents #
############
# - Setup
# - Assign buckets
# - Group and aggregate
# - Set coordinates
# - Pipeline functions
# - Plotting
# - Running interactively
#########
# Setup #
#########
# Import built-in modules
import functools
import inspect
# Import external modules
import numpy as np
import pandas as pd
import bokeh
import bokeh.palettes
##################
# Assign buckets #
##################
def divide_n(df, bucket_var, n_bins=10):
"""
Assign each row of `df` to a bucket by dividing the range of the
`bucket_var` column into `n_bins` number of equal width intervals.
df: DataFrame
bucket_var: Name of the column of df to use for dividing.
n_bins: positive integer number of buckets.
Returns: df with the additional `bucket` column
The `bucket` column is Categorical data type consisting of Intervals
that partition the interval from just below min(bucket_var) to
max(bucket_var).
"""
df_w_buckets = df.assign(
bucket=lambda df: pd.cut(df[bucket_var], bins=n_bins)
)
return(df_w_buckets)
def custom_width(df, bucket_var, width, boundary=0, first_break=None, last_break=None):
"""
Assign each row of `df` to a bucket by dividing the range of the
`bucket_var` column into `n_bins` number of equal width intervals.
df: DataFrame
bucket_var: Name of the column of df to use for dividing.
width: Positive width of the buckets
boundary: Edge of one of the buckets, if the data extended that far
first_break: All values below this (if any) are grouped into one bucket
last_break: All values above this (if any) are grouped into one bucket
Returns: df with the additional `bucket` column
The `bucket` column is Categorical data type consisting of Intervals
that partition the interval from just below min(bucket_var) to
max(bucket_var).
"""
var_min, var_max = df[bucket_var].min(), df[bucket_var].max()
extended_min = var_min - 0.001 * np.min([(var_max - var_min), width])
# Set bucket edges
start = np.floor((extended_min - boundary) / width) * width + boundary
stop = np.ceil((var_max - boundary) / width) * width + boundary
num = int((stop - start) / width) + 1
breaks_all = np.array([
extended_min,
*np.linspace(start, stop, num)[1:-1],
var_max,
])
# Clip lower and upper buckets
breaks_clipped = breaks_all
if first_break is not None or last_break is not None:
breaks_clipped = np.unique(np.array([
breaks_all.min(),
*np.clip(breaks_all, first_break, last_break),
breaks_all.max(),
]))
breaks_clipped
df_w_buckets = df.assign(
bucket=lambda df: | pd.cut(df[bucket_var], bins=breaks_clipped) | pandas.cut |
"""Climate normals daily data"""
import re
import numpy as np
import pandas as pd
def norm_get_dly():
"""Get all daily climate normals data"""
prcp = norm_get_dly_prcp()
snow = norm_get_dly_snow()
tavg = norm_get_dly_tavg()
tmax = norm_get_dly_tmax()
tmin = norm_get_dly_tmin()
by = ["id", "month", "day"]
df = pd.merge(prcp, snow, how="outer", on=by)
df = pd.merge(df, tavg, how="outer", on=by)
df = | pd.merge(df, tmax, how="outer", on=by) | pandas.merge |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
| tm.assert_frame_equal(data, data2) | pandas.util.testing.assert_frame_equal |
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.andet import kde, sr
from utils.tools import StandardScaler, padding
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len]
border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_ETT_minute(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTm1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
border1s = [0, 12*30*24*4 - self.seq_len, 12*30*24*4+4*30*24*4 - self.seq_len]
border2s = [12*30*24*4, 12*30*24*4+4*30*24*4, 12*30*24*4+8*30*24*4]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_Custom(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
# cols = list(df_raw.columns);
if self.cols:
cols=self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
df_raw = df_raw[['date']+cols+[self.target]]
num_train = int(len(df_raw)*0.7)
num_test = int(len(df_raw)*0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_Pred(Dataset):
def __init__(self, root_path, flag='pred', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['pred']
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols=cols
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
if self.cols:
cols=self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
df_raw = df_raw[['date']+cols+[self.target]]
border1 = len(df_raw)-self.seq_len
border2 = len(df_raw)
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
self.scaler.fit(df_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
tmp_stamp = df_raw[['date']][border1:border2]
tmp_stamp['date'] = | pd.to_datetime(tmp_stamp.date) | pandas.to_datetime |
import os
import re
import functools
from itertools import chain
import attr
import logbook
from pathlib import Path
import pandas as pd
from ete3 import Tree
from common import config
from common.rename import *
genus = snakemake.config["genus"]
species = snakemake.config["species"]
taxid = snakemake.config["taxid"]
section = snakemake.config["section"]
group = snakemake.config["group"]
threads = snakemake.config["threads"]
root = Path(snakemake.config["root"])
outdir = root / "human_readable" / section / group / genus / species
section_dir = outdir / section
group_dir = section_dir / group
fastas = outdir.rglob("GCA*fna.gz")
summary = | pd.read_csv(root / "summary.tsv", sep="\t", index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.ix[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.ix[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.ix['b'] = np.nan
result = p.dropna(how='all')
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
com.pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
self.assertRaises(ValueError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
assert_panel_equal(dropped, panel)
dropped = panel.drop(['Two', 'Three'], errors='ignore')
expected = Panel({"One": df})
assert_panel_equal(dropped, expected)
# Major
exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(0, 1, ['major_axis', 'major'], expected)
exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop([1], 1, ['major_axis', 'major'], expected)
# Minor
exp_df = df[['B']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
exp_df = df[['A']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop("B", 2, ['minor_axis', 'minor'], expected)
def test_update(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[3.6, 2., 3], [1.5, np.nan, 7], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_from_dict(self):
pan = Panel({'one': DataFrame([[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
'two': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
other = {'two': DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]])}
pan.update(other)
expected = Panel(
{'two': DataFrame([[3.6, 2., 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]),
'one': DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]])})
assert_panel_equal(pan, expected)
def test_update_nooverwrite(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, overwrite=False)
expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, 2., 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_filtered(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel([[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, filter_func=lambda x: x > 2)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, np.nan, 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_raise(self):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
np.testing.assert_raises(Exception, pan.update, *(pan, ),
**{'raise_conflict': True})
def test_all_any(self):
self.assertTrue((self.panel.all(axis=0).values == nanall(
self.panel, axis=0)).all())
self.assertTrue((self.panel.all(axis=1).values == nanall(
self.panel, axis=1).T).all())
self.assertTrue((self.panel.all(axis=2).values == nanall(
self.panel, axis=2).T).all())
self.assertTrue((self.panel.any(axis=0).values == nanany(
self.panel, axis=0)).all())
self.assertTrue((self.panel.any(axis=1).values == nanany(
self.panel, axis=1).T).all())
self.assertTrue((self.panel.any(axis=2).values == nanany(
self.panel, axis=2).T).all())
def test_all_any_unhandled(self):
self.assertRaises(NotImplementedError, self.panel.all, bool_only=True)
self.assertRaises(NotImplementedError, self.panel.any, bool_only=True)
class TestLongPanel(tm.TestCase):
"""
LongPanel no longer exists, but...
"""
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
panel = tm.makePanel()
tm.add_nans(panel)
self.panel = panel.to_frame()
self.unfiltered_panel = panel.to_frame(filter_observations=False)
def test_ops_differently_indexed(self):
# trying to set non-identically indexed panel
wp = self.panel.to_panel()
wp2 = wp.reindex(major=wp.major_axis[:-1])
lp2 = wp2.to_frame()
result = self.panel + lp2
assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
# careful, mutation
self.panel['foo'] = lp2['ItemA']
assert_series_equal(self.panel['foo'].reindex(lp2.index), lp2['ItemA'],
check_names=False)
def test_ops_scalar(self):
result = self.panel.mul(2)
expected = DataFrame.__mul__(self.panel, 2)
assert_frame_equal(result, expected)
def test_combineFrame(self):
wp = self.panel.to_panel()
result = self.panel.add(wp['ItemA'].stack(), axis=0)
assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
wp = self.panel.to_panel()
result = self.panel.add(self.panel)
wide_result = result.to_panel()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
# one item
result = self.panel.add(self.panel.filter(['ItemA']))
def test_combine_scalar(self):
result = self.panel.mul(2)
expected = DataFrame(self.panel._data) * 2
assert_frame_equal(result, expected)
def test_combine_series(self):
s = self.panel['ItemA'][:10]
result = self.panel.add(s, axis=0)
expected = DataFrame.add(self.panel, s, axis=0)
assert_frame_equal(result, expected)
s = self.panel.ix[5]
result = self.panel + s
expected = DataFrame.add(self.panel, s, axis=1)
assert_frame_equal(result, expected)
def test_operators(self):
wp = self.panel.to_panel()
result = (self.panel + 1).to_panel()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_arith_flex_panel(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
self.panel = self.panel.to_panel()
for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
for op in ops:
alias = aliases.get(op, op)
f = getattr(operator, alias)
exp = f(self.panel, n)
result = getattr(self.panel, op)(n)
assert_panel_equal(result, exp, check_panel_type=True)
# rops
r_f = lambda x, y: f(y, x)
exp = r_f(self.panel, n)
result = getattr(self.panel, 'r' + op)(n)
assert_panel_equal(result, exp)
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sortlevel(level=1)
self.assertTrue(is_sorted(sorted_minor.index.labels[1]))
sorted_major = sorted_minor.sortlevel(level=0)
self.assertTrue(is_sorted(sorted_major.index.labels[0]))
def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(start, end)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(before=start).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(before=start)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(after=end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(after=end)
# TODO trucate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
# truncate on dates that aren't in there
wp = self.panel.to_panel()
new_index = wp.major_axis[::5]
wp2 = wp.reindex(major=new_index)
lp2 = wp2.to_frame()
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
assert_panel_equal(wp_trunc, lp_trunc.to_panel())
# throw proper exception
self.assertRaises(Exception, lp2.truncate, wp.major_axis[-2],
wp.major_axis[2])
def test_axis_dummies(self):
from pandas.core.reshape import make_axis_dummies
minor_dummies = make_axis_dummies(self.panel, 'minor')
self.assertEqual(len(minor_dummies.columns),
len(self.panel.index.levels[1]))
major_dummies = make_axis_dummies(self.panel, 'major')
self.assertEqual(len(major_dummies.columns),
len(self.panel.index.levels[0]))
mapping = {'A': 'one', 'B': 'one', 'C': 'two', 'D': 'two'}
transformed = make_axis_dummies(self.panel, 'minor',
transform=mapping.get)
self.assertEqual(len(transformed.columns), 2)
self.assert_numpy_array_equal(transformed.columns, ['one', 'two'])
# TODO: test correctness
def test_get_dummies(self):
from pandas.core.reshape import get_dummies, make_axis_dummies
self.panel['Label'] = self.panel.index.labels[1]
minor_dummies = | make_axis_dummies(self.panel, 'minor') | pandas.core.reshape.make_axis_dummies |
import pickle
import random
import string
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from scipy import stats
import linearmodels
from linearmodels.shared.exceptions import missing_warning
from linearmodels.shared.hypotheses import (
InapplicableTestStatistic,
InvalidTestStatistic,
WaldTestStatistic,
)
from linearmodels.shared.io import add_star, format_wide
from linearmodels.shared.linalg import has_constant, inv_sqrth
from linearmodels.shared.utility import AttrDict, ensure_unique_column, panel_to_frame
MISSING_PANEL = "Panel" not in dir(pd)
def test_missing_warning():
missing = np.zeros(500, dtype=bool)
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 0
missing[0] = True
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 1
original = linearmodels.WARN_ON_MISSING
linearmodels.WARN_ON_MISSING = False
with warnings.catch_warnings(record=True) as w:
missing_warning(missing)
assert len(w) == 0
linearmodels.WARN_ON_MISSING = original
def test_hasconstant():
x = np.random.randn(100, 3)
hc, loc = has_constant(x)
assert bool(hc) is False
assert loc is None
x[:, 0] = 1
hc, loc = has_constant(x)
assert hc is True
assert loc == 0
x[:, 0] = 2
hc, loc = has_constant(x)
assert hc is True
assert loc == 0
x[::2, 0] = 0
x[:, 1] = 1
x[1::2, 1] = 0
hc, loc = has_constant(x)
assert hc is True
def test_wald_statistic():
ts = WaldTestStatistic(1.0, "_NULL_", 1, name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_NULL_" in str(ts)
assert ts.stat == 1.0
assert ts.df == 1
assert ts.df_denom is None
assert ts.dist_name == "chi2(1)"
assert isinstance(ts.critical_values, dict)
assert_allclose(1 - stats.chi2.cdf(1.0, 1), ts.pval)
ts = WaldTestStatistic(1.0, "_NULL_", 1, 1000, name="_NAME_")
assert ts.df == 1
assert ts.df_denom == 1000
assert ts.dist_name == "F(1,1000)"
assert_allclose(1 - stats.f.cdf(1.0, 1, 1000), ts.pval)
def test_invalid_test_statistic():
ts = InvalidTestStatistic("_REASON_", name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_REASON_" in str(ts)
assert np.isnan(ts.pval)
assert ts.critical_values is None
def test_inapplicable_test_statistic():
ts = InapplicableTestStatistic(reason="_REASON_", name="_NAME_")
assert str(hex(id(ts))) in ts.__repr__()
assert "_REASON_" in str(ts)
assert np.isnan(ts.pval)
assert ts.critical_values is None
ts = InapplicableTestStatistic()
assert "not applicable" in str(ts)
def test_inv_sqrth():
x = np.random.randn(1000, 10)
xpx = x.T @ x
invsq = inv_sqrth(xpx)
prod = invsq @ xpx @ invsq - np.eye(10)
assert_allclose(1 + prod, np.ones((10, 10)))
def test_ensure_unique_column():
df = pd.DataFrame({"a": [0, 1, 0], "b": [1.0, 0.0, 1.0]})
out = ensure_unique_column("a", df)
assert out == "_a_"
out = ensure_unique_column("c", df)
assert out == "c"
out = ensure_unique_column("a", df, "=")
assert out == "=a="
df["_a_"] = -1
out = ensure_unique_column("a", df)
assert out == "__a__"
def test_attr_dict():
ad = AttrDict()
ad["one"] = "one"
ad[1] = 1
ad[("a", 2)] = ("a", 2)
assert list(ad.keys()) == ["one", 1, ("a", 2)]
assert len(ad) == 3
plk = pickle.dumps(ad)
pad = pickle.loads(plk)
assert list(pad.keys()) == ["one", 1, ("a", 2)]
assert len(pad) == 3
ad2 = ad.copy()
assert list(ad2.keys()) == list(ad.keys())
assert ad.get("one", None) == "one"
assert ad.get("two", False) is False
k, v = ad.popitem()
assert k == "one"
assert v == "one"
items = ad.items()
assert (1, 1) in items
assert (("a", 2), ("a", 2)) in items
assert len(items) == 2
values = ad.values()
assert 1 in values
assert ("a", 2) in values
assert len(values) == 2
ad2 = AttrDict()
ad2[1] = 3
ad2["one"] = "one"
ad2["a"] = "a"
ad.update(ad2)
assert ad[1] == 3
assert "a" in ad
ad.__str__()
with pytest.raises(AttributeError):
ad.__private_dict__ = None
with pytest.raises(AttributeError):
ad.some_other_key
with pytest.raises(KeyError):
ad["__private_dict__"] = None
del ad[1]
assert 1 not in ad.keys()
ad.new_value = "new_value"
assert "new_value" in ad.keys()
assert ad.new_value == ad["new_value"]
for key in ad.keys():
if isinstance(key, str):
assert key in dir(ad)
new_value = ad.pop("new_value")
assert new_value == "new_value"
del ad.one
assert "one" not in ad.keys()
ad.clear()
assert list(ad.keys()) == []
def test_format_wide():
k = 26
inputs = [chr(65 + i) * (20 + i) for i in range(k)]
out = format_wide(inputs, 80)
assert max([len(v) for v in out]) <= 80
out = format_wide(["a"], 80)
assert out == [["a"]]
def test_panel_to_midf():
x = np.random.standard_normal((3, 7, 100))
df = panel_to_frame(x, list(range(3)), list(range(7)), list(range(100)))
mi = pd.MultiIndex.from_product([list(range(7)), list(range(100))])
expected = pd.DataFrame(index=mi, columns=[0, 1, 2])
for i in range(3):
expected[i] = x[i].ravel()
expected.index.names = ["major", "minor"]
pd.testing.assert_frame_equal(df, expected)
expected2 = expected.copy()
expected2 = expected2.sort_index(level=[1, 0])
expected2.index = expected2.index.swaplevel(0, 1)
expected2.index.names = ["major", "minor"]
df2 = panel_to_frame(x, list(range(3)), list(range(7)), list(range(100)), True)
pd.testing.assert_frame_equal(df2, expected2)
entities = list(
map(
"".join,
[
[random.choice(string.ascii_lowercase) for __ in range(10)]
for _ in range(100)
],
)
)
times = pd.date_range("1999-12-31", freq="A-DEC", periods=7)
var_names = ["x.{0}".format(i) for i in range(1, 4)]
df3 = panel_to_frame(x, var_names, times, entities, True)
mi = pd.MultiIndex.from_product([times, entities])
expected3 = pd.DataFrame(index=mi, columns=var_names)
for i in range(1, 4):
expected3["x.{0}".format(i)] = x[i - 1].ravel()
expected3.index = expected3.index.swaplevel(0, 1)
mi = | pd.MultiIndex.from_product([entities, times]) | pandas.MultiIndex.from_product |
from flask import Flask, render_template, request
import pickle
import joblib
import pandas as pd
from geopy.geocoders import Nominatim
# app instantiation
APP = Flask(__name__)
@APP.route('/')
def Home_page():
'''Landing page to the Kickstarter Prediction project'''
return render_template('landing.html', title='Home')
@APP.route('/prediction', methods= ["POST"])
def prediction():
# Winter, Spring, Summer, Fall
time_of_year = request.form['time_of_year'] #
# Latitude and Longitude
geolocator = Nominatim(user_agent="airbnb")
address = request.form['addy']
location = geolocator.geocode(address)
lat = location.latitude
lon = location.longitude
# Room Type, Superhost, Instant Bookable, Description Length
room_type = request.form['room_type']
super_host = True if request.form['super_host']=='1' else False
instant_bookable = True if request.form['instant_bookable']=='1' else False
description_len = len(request.form['description'])
# Accomodates, Bedrooms, Beds, Baths, Shared Baths, ppl_per_bed
accommodates = int(request.form['accomodates']) #
n_bedrooms = int(request.form['n_bedrooms']) #
n_beds = int(request.form['n_beds']) #
n_baths = int(request.form['n_baths']) #
shared_baths = True if request.form['shared_baths']=='1' else False
ppl_per_bed = accommodates/n_beds if n_beds!=0 else accommodates/1
n_amenities = int(request.form['n_amenities']) #
# Host experience, total reviews, total_statisfaction, reviews since
host_since = int(request.form['host_since'])
host_experience_yrs = round(2021 - int(host_since))
total_reviews = int(request.form['total_reviews'])
total_satisfaction = float(request.form['total_satisfaction']) # If we average the survey values then how are we going to ask
reviews_per_month = total_reviews/(host_experience_yrs*12)
# Min and Max nights
min_nights = int(request.form['min_nights'])
max_nights = int(request.form['max_nights'])
# Dataframe
column_names = ['lat', 'lon', 'room_type', 'superhost', 'instant_bookable',
'description_len', 'n_amenities', 'accommodates', 'n_bedrooms',
'n_beds', 'n_baths', 'shared_baths', 'min_nights', 'max_nights',
'reviews_per_month', 'total_reviews', 'total_satisfaction',
'host_experience_yrs', 'ppl_per_bed']
info = [[lat, lon, room_type, super_host, instant_bookable,
description_len, n_amenities, accommodates, n_bedrooms,
n_beds, n_baths, shared_baths, min_nights, max_nights,
reviews_per_month, total_reviews, total_satisfaction,
host_experience_yrs, ppl_per_bed]]
listing = | pd.DataFrame(info,columns=column_names) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
@pytest.mark.parametrize(
"other",
[
np.array(["NaT"] * 9, dtype="m8[ns]"),
TimedeltaArray._from_sequence(["NaT"] * 9),
],
)
def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other):
# FIXME: DataFrame fails because when when operating column-wise
# timedelta64 entries become NaT and are treated like datetimes
box = box_df_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
# ---------------------------------------------------------------
# Unsorted
def test_parr_add_sub_index(self):
# Check that PeriodArray defers to Index on arithmetic ops
pi = pd.period_range("2000-12-31", periods=3)
parr = pi.array
result = parr - pi
expected = pi - pi
tm.assert_index_equal(result, expected)
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
expected = pd.Series(
[pd.Period("2015-01-02", freq="D"), pd.Period("2015-01-03", freq="D")],
name="xxx",
)
result = ser + pd.Timedelta("1 days")
tm.assert_series_equal(result, expected)
result = pd.Timedelta("1 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
per = | pd.Period("2015-01-10", freq="D") | pandas.Period |
######### Bernoulli, SVM, Logistic Regression XGboost ##########
# utilities
import re
import numpy as np
import pandas as pd
# plotting
import seaborn as sns
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# nltk
import nltk
from nltk.stem import PorterStemmer
# sklearn
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
import xgboost as xgb
# Importing the dataset
DATASET_COLUMNS=['target','ids','date','flag','user','text']
DATASET_ENCODING = "ISO-8859-1"
df = pd.read_csv (r'Dataset_2.csv' , encoding=DATASET_ENCODING, names=DATASET_COLUMNS)
########### Data Preprocessing ############
#select text and target for analysis
data=df[['text','target']]
#Replacing the values to ease understanding. (Assigning 1 to Positive sentiment 4)
data['target'] = data['target'].replace(4,1)
#Separating positive and negative tweets
data_pos = data[data['target'] == 1]
data_neg = data[data['target'] == 0]
#taking one fourth data so we can run on our machine easily
data_pos = data_pos.iloc[:int(200000)]
data_neg = data_neg.iloc[:int(200000)]
#Combining positive and negative tweets
dataset = pd.concat([data_pos, data_neg])
#Making statement text in lower case
dataset['text']=dataset['text'].str.lower()
#Defining set containing all stopwords in English.
stopwordlist = ['a', 'about', 'above', 'after', 'again', 'ain', 'all', 'am', 'an',
'and','any','are', 'as', 'at', 'be', 'because', 'been', 'before',
'being', 'below', 'between','both', 'by', 'can', 'd', 'did', 'do',
'does', 'doing', 'down', 'during', 'each','few', 'for', 'from',
'further', 'had', 'has', 'have', 'having', 'he', 'her', 'here',
'hers', 'herself', 'him', 'himself', 'his', 'how', 'i', 'if', 'in',
'into','is', 'it', 'its', 'itself', 'just', 'll', 'm', 'ma',
'me', 'more', 'most','my', 'myself', 'now', 'o', 'of', 'on', 'once',
'only', 'or', 'other', 'our', 'ours','ourselves', 'out', 'own', 're','s', 'same', 'she', "shes", 'should', "shouldve",'so', 'some', 'such',
't', 'than', 'that', "thatll", 'the', 'their', 'theirs', 'them',
'themselves', 'then', 'there', 'these', 'they', 'this', 'those',
'through', 'to', 'too','under', 'until', 'up', 've', 'very', 'was',
'we', 'were', 'what', 'when', 'where','which','while', 'who', 'whom',
'why', 'will', 'with', 'won', 'y', 'you', "youd","youll", "youre",
"youve", 'your', 'yours', 'yourself', 'yourselves']
#Cleaning and removing the above stop words list from the tweet text
STOPWORDS = set(stopwordlist)
def cleaning_stopwords(text):
return " ".join([word for word in str(text).split() if word not in STOPWORDS])
dataset['text'] = dataset['text'].apply(lambda text: cleaning_stopwords(text))
#Cleaning and removing punctuations
import string
english_punctuations = string.punctuation
punctuations_list = english_punctuations
def cleaning_punctuations(text):
translator = str.maketrans('', '', punctuations_list)
return text.translate(translator)
dataset['text']= dataset['text'].apply(lambda x: cleaning_punctuations(x))
#Cleaning and removing repeating characters
def cleaning_repeating_char(text):
return re.sub(r'(.)1+', r'1', text)
dataset['text'] = dataset['text'].apply(lambda x: cleaning_repeating_char(x))
#Cleaning and removing URL’s
def cleaning_URLs(data):
return re.sub('((www.[^s]+)|(https?://[^s]+))',' ',data)
dataset['text'] = dataset['text'].apply(lambda x: cleaning_URLs(x))
#Cleaning and removing numbers
def cleaning_numbers(data):
return re.sub('[0-9]+', '', data)
dataset['text'] = dataset['text'].apply(lambda x: cleaning_numbers(x))
st=PorterStemmer()
#Stemming tweets
def stemming_sentence(sentence):
words = nltk.word_tokenize(sentence)
res_words = []
for word in words:
res_words.append(st.stem(word))
return " ".join(res_words)
dataset['text'] = dataset['text'].apply(lambda x: stemming_sentence(x))
#Separating input feature and label
X=dataset.text
y=dataset.target
# Separating the 95% data for training data and 5% for testing data
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.05, random_state =0)
#TF-IDF Vectorizer
vectoriser = TfidfVectorizer(ngram_range=(1,2), max_features=500000)
vectoriser.fit(X_train)
print(X_train)
#Transform the data using TF-IDF Vectorizer
X_train = vectoriser.transform(X_train)
X_test = vectoriser.transform(X_test)
#Model Bernoulli Naive Bayes
BNBmodel = BernoulliNB()
BNBmodel.fit(X_train, y_train)
y_pred1 = BNBmodel.predict(X_test)
print(classification_report(y_test, y_pred1))
#Model SVM(Support Vector Machine)
SVCmodel = LinearSVC()
SVCmodel.fit(X_train, y_train)
y_pred2 = SVCmodel.predict(X_test)
print(classification_report(y_test, y_pred2))
# Model Logistic Regression
LRmodel = LogisticRegression(C = 2, max_iter = 1000, n_jobs=-1)
LRmodel.fit(X_train, y_train)
y_pred3 = LRmodel.predict(X_test)
print(classification_report(y_test, y_pred3))
#Model XGBoost
xg = xgb.XGBClassifier(use_label_encoder=False, eval_metric='error')
xg.fit(X_train, y_train)
y_pred4 = xg.predict(X_test)
print(classification_report(y_test, y_pred4))
########## Classify real tweets fetching them with twitter API ###########
#Setting twitter API
import tweepy
consumer_key = "z4eXbyALxvkOODCNafLiywMmr"
consumer_secret_key = "<KEY>"
access_token = '<KEY>'
access_token_secret = '<KEY>'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret_key)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#Fetching tweets ordered by a topic
keyword = input('Enter a field: ')
count = 500
#TWEETS
tweets = tweepy.Cursor(api.search_tweets,q=keyword,count = 100,tweet_mode = 'extended',lang='en').items(count)
#tweets = tweepy.Cursor(api.user_timeline,screen_name='JoeBiden',count = 100,tweet_mode = 'extended').items(count)
#DATAFRAME OF TWEETS
columns_t = ['User','Tweet']
data_t = []
for tweet in tweets:
data_t.append([tweet.user.screen_name, tweet.full_text])
df_tweets = | pd.DataFrame(data_t,columns=columns_t) | pandas.DataFrame |
# Freddy @BH
# Dec 20, 2017
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
import numpy as np
import pandas as pd
import sys
import math
import time
from tqdm import *
from data_preprocess import *
from utils import *
from logger import Logger
# Hyper Parameters
num_epochs = 10
batch_size = 256
#learning_rate = 1e-3
learning_rate = 1e-4
# argv
#data_dir = sys.argv[1]
debug = False
load_prev_model = False
direct_test = False
use_gpu = False
if(sys.argv[2] == '1'):
debug = True
if(sys.argv[3] == '1'):
load_prev_model = True
if(sys.argv[4] == '1'):
direct_test = True
if(torch.cuda.is_available()):
use_gpu = True
''' Data '''
class DigitDataset(Dataset):
def __init__(self, matrix_csv_path, label_csv_path, dtype):
matrix_data = pd.read_csv(matrix_csv_path,header=None)
label_data = | pd.read_csv(label_csv_path,header=None) | pandas.read_csv |
# from cdk_pywrapper.cdk_pywrapper import Compound
import pandas as pd
import copy
import json
import os
import wikidataintegrator as wdi
from cdk_pywrapper.cdk_pywrapper import Compound
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import RequestError
es = Elasticsearch()
def generate_fingerprint(smiles, compound_id, main_label, qid):
if smiles:
compound = Compound(compound_string=smiles, identifier_type='smiles', suppress_hydrogens=True)
fingerprint = compound.get_bitmap_fingerprint()
fp = {x for x in str(fingerprint)[1:-1].split(', ')}
# if only compound id is set as a label, try to set something more useful
if compound_id in compound_id_fp_map:
sim_item = compound_id_fp_map[compound_id]
if sim_item[1] == compound_id:
sim_item[1] = main_label
else:
compound_id_fp_map.update({compound_id: (compound_id, main_label, qid, fp)})
return list(fp)
else:
return []
def update_es(data, index='reframe'):
tmp_data = copy.deepcopy(data)
ikey = tmp_data['ikey']
if es.exists(index=index, doc_type='compound', id=ikey):
# for k, v in data.items():
# if (type(v) == list or type(v) == dict) and len(v) == 0:
# del tmp_data[k]
# elif not v:
# del tmp_data[k]
es.update(index=index, id=ikey, doc_type='compound', body={'doc': tmp_data})
else:
try:
# if index does not yet exist, make sure that all fields are being added
res = es.index(index=index, doc_type='compound', id=ikey, body=data)
except RequestError as e:
print(tmp_obj)
def desalt_compound(smiles):
desalted_smiles = []
desalted_ikeys = []
if smiles:
for single_compound in smiles.split('.'):
desalted_smiles.append(single_compound)
try:
compound = Compound(compound_string=single_compound, identifier_type='smiles')
ikey = compound.get_inchi_key()
desalted_ikeys.append(ikey)
except Exception as e:
desalted_ikeys.append('')
return desalted_smiles, desalted_ikeys
def get_rfm_ids(ikey):
rfm_ids = []
chem_vendors = []
for k, v in rfm_ikey_map.items():
s_id, s_vendor, s_vendor_id = v
if s_id == ikey:
rfm_ids.append(True)
chem_vendors.append({'chem_vendor': s_vendor if pd.notnull(s_vendor) else '',
'chem_vendor_id': s_vendor_id if pd.notnull(s_vendor_id) else ''})
return rfm_ids, chem_vendors
def calculate_tanimoto(fp_1, fp_2):
intersct = fp_1.intersection(fp_2)
return len(intersct)/(len(fp_1) + len(fp_2) - len(intersct))
# index name 'reframe'
gvk_doc_map = {
# 'hvac_id': 'hvac_id',
'gvk_id': 'gvk_id',
# 'calibr_note': None,
'drug_name': ('drug_name', '; '),
'phase': ('phase', '; '),
'drug_roa': ('roa', '; '),
'category': ('category', '; '),
'mechanism': ('mechanism', '; '),
'sub_smiles': 'smiles',
'synonyms': ('synonyms', '; '),
'ikey': 'ikey'
}
integrity_doc_map = {
'id': 'id',
'smiles': 'smiles',
'name': ('drug_name', '; '),
'status': ('phase', '; '),
'int_thera_group': ('category', '; '),
'int_MoA': ('mechanism', '; '),
# 'calibr_note': None,
'ikey': 'ikey',
'wikidata': 'wikidata',
'PubChem CID': 'PubChem CID'
}
informa_doc_map = {
'name': ('drug_name', '\n'),
# 'Global Status': ('phase', '; '),
'highest_status (between global and Highest Status)': 'highest_phase',
'moa': ('mechanism', '\n'),
'target_name': ('target_name', '\n'),
'target_family': ('target_families', '\n'),
'origin': 'origin',
'chem_name': 'chemical_name',
'smiles': 'smiles',
'key': None,
'ikey': 'ikey',
'pubchem': 'PubChem CID',
'wikidata': 'wikidata',
'informa_id': 'informa_id'
}
assay_data_doc_map = {
'calibr_id': 'reframe_id',
'ac50': 'ac50',
'datamode': 'datamode',
'genedata_id': 'assay_id',
'assay_title': 'assay_title',
# 'smiles': 'smiles',
'ikey': 'ikey',
'PubChem CID': 'PubChem CID',
'pubchem_label': 'pubchem_label',
'wikidata': 'wikidata',
'library': 'chem_vendor',
'source_id': 'chem_vendor_id'
}
reframe_doc = {
'ikey': '',
'reframe_id': [],
'chem_vendors': [],
'qid': '',
'alt_id': '',
# 'fingerprint': [],
'similar_compounds': [],
'gvk': {
},
'integrity': {
},
'informa': {
},
'assay': []
}
basic_block = {
'id': None,
'phase': [],
'mechanism': [],
'category': []
}
data_dir = os.getenv('DATA_DIR')
# assay_data = pd.read_csv(os.path.join(data_dir, 'reframe_short_20170822.csv'))
gvk_dt = pd.read_csv(os.path.join(data_dir, '20180430_GVK_excluded_column.csv'))
integrity_dt = pd.read_csv(os.path.join(data_dir, 'integrity_annot_20180504.csv'))
informa_dt = pd.read_csv(os.path.join(data_dir, '20180430_Informa_excluded_column.csv'))
assay_descr = pd.read_csv(os.path.join(data_dir, '20180222_assay_descriptions.csv'), header=0)
assay_data = pd.read_csv(os.path.join(data_dir, 'assay_data_w_vendor_mapping.csv'), header=0)
vendor_dt = pd.read_csv(os.path.join(data_dir, 'portal_info_annot.csv'), sep=',')
ikey_wd_map = wdi.wdi_helpers.id_mapper('P235')
compound_id_fp_map = {}
rfm_ikey_map = {x['public_id']: (x['ikey'], x['library'], x['source_id']) for x in
vendor_dt[['public_id', 'ikey', 'library', 'source_id']].to_dict(orient='records')}
for c, x in gvk_dt.iterrows():
if x['exclude'] == 1:
continue
ikey = x['ikey']
if pd.isnull(ikey):
if pd.notnull(x['gvk_id']):
ikey = str(x['gvk_id'])
else:
continue
tmp_obj = copy.deepcopy(reframe_doc)
tmp_obj['ikey'] = ikey
tmp_obj['reframe_id'], tmp_obj['chem_vendors'] = get_rfm_ids(ikey)
if ikey in ikey_wd_map:
tmp_obj['qid'] = ikey_wd_map[ikey]
for k, v in gvk_doc_map.items():
if pd.isnull(x[k]) or v is None:
continue
if type(v) == tuple:
tmp_obj['gvk'].update({v[0]: x[k].split(v[1])})
else:
tmp_obj['gvk'].update({v: x[k]})
if 'smiles' in tmp_obj['gvk']:
smiles = tmp_obj['gvk']['smiles']
main_label = tmp_obj['gvk']['drug_name'][0] if len(tmp_obj['gvk']['drug_name']) > 0 else ikey
# tmp_obj['fingerprint'] = generate_fingerprint(smiles, ikey, main_label, tmp_obj['qid'])
fp = generate_fingerprint(smiles, ikey, main_label, tmp_obj['qid'])
if len(fp) > 0:
tmp_obj['fingerprint'] = fp
d_smiles, d_ikey = desalt_compound(smiles)
if len(d_smiles) > 1:
tmp_obj['sub_smiles'] = d_smiles
tmp_obj['sub_ikey'] = d_ikey
update_es(tmp_obj)
# if c > 20:
# break
if c % 100 == 0:
print(c)
for c, x in integrity_dt.iterrows():
if x['exclude'] == 1:
continue
ikey = x['ikey']
if pd.isnull(ikey):
if pd.notnull(x['id']):
ikey = str(x['id'])
else:
continue
tmp_obj = copy.deepcopy(reframe_doc)
tmp_obj['ikey'] = ikey
tmp_obj['reframe_id'], tmp_obj['chem_vendors'] = get_rfm_ids(ikey)
if ikey in ikey_wd_map:
tmp_obj['qid'] = ikey_wd_map[ikey]
for k, v in integrity_doc_map.items():
if | pd.isnull(x[k]) | pandas.isnull |
from pywim.utils.stats import iqr
import numpy as np
import pandas as pd
import peakutils
def sensors_estimation(
signal_data: pd.DataFrame, sensors_delta_distance: list
) -> [np.array]:
"""
:param signal_data:
:param sensors_delta_distance:
:return:
"""
# x axis: time
x = signal_data.index.values
sensors_peak_time = []
sensors_delta_time = [None]
for k in signal_data.keys():
# y axis: volts
y = signal_data[k].values
indexes = peakutils.indexes(y, thres=0.5, min_dist=30)
sensors_peak_time.append(x[indexes])
for i in range(1, len(sensors_peak_time)):
sensors_delta_time.append(
sensors_peak_time[i] - sensors_peak_time[i - 1]
)
# the information about first sensor should be equal to the second sensor
sensors_delta_time[0] = sensors_delta_time[1]
sensors_delta_speed = []
for i in range(len(sensors_delta_distance)):
sensors_delta_speed.append(
sensors_delta_distance[i] / sensors_delta_time[i]
)
# the information about first sensor should be equal to the second sensor
sensors_delta_speed[0] = sensors_delta_speed[1]
return sensors_delta_speed
def average_estimation(
signal_data: pd.DataFrame=None,
sensors_delta_distance: list=None,
sensors_delta_speed: list=None
) -> float:
"""
:param signal_data:
:param sensors_delta_distance:
:param sensors_delta_speed:
:return:
"""
if not sensors_delta_speed:
sensors_delta_speed = sensors_estimation(
signal_data, sensors_delta_distance
)
speed_values = np.array([])
for sensor_speeds in sensors_delta_speed[1:]:
speed_values = np.concatenate((speed_values, sensor_speeds))
return iqr.reject_outliers( | pd.Series(speed_values) | pandas.Series |
################################################################################
# This module aggregates the all psycho-linguistic measures into one matrix by
# each 'AC_Doc_ID (item stem or option).
# Parameters df_ac_pos: input pandas.DataFrame, it should have, at least, POS
# count columns with the 'AC_Doc_ID's as the index of
# the DataFrame
# pos_start_q: integer column number (starting from zero)
# specifying the starting point of POS count
# columns in the question DataFrame, from the point
# to the end, all the columns should be the POS count
# columns
# df_ac_loc_overlapping_lemma: pandas.DataFrame of the overlapping
# lemma location information, even no
# location information, still
# df_ac_overlapping_lemma is
# acceptable
# df_ac_loc_overlapping_syn_lemma: pandas.DataFrame of
# the overlapping lemma with synonym
# location information, even no
# location information, still
# df_ac_overlapping_syn_lemma is
# acceptable
# df_ac_overlapping_nchunk: pandas.DataFrame as a result of
# overlapping NChunk counts
# df_ac_oanc_lemma_freq_q: pandas.DataFrame reporting each
# 'AC_Doc_ID's lemma frequency stats
# stem_option_name_clm: column name of stem/option identifier
# in the aggregated DataFrame
# stem_identifier: name of the stem identifier in the aggregated
# DataFrame
# keep_specific_columns_POS = None: a list of column names to be
# included into the aggrageted
# matrix as a part of the original
# columns of the df_ac_pos input
# DataFrame
# stop_words_POS = None: list of POS to specify stop words, they
# should all include in the POS question
# and passage DataFrames
# df_ac_lemma_q = None: pandas.DataFrame of questions, it should
# have, at least, lemma count columns with the 'AC_Doc_ID's
# as the index of the DataFrame
# include_specific_lemma_count = None: a list of lemmas to be
# included into the aggrageted
# matrix as the lemma counts
# df_ac_pos_p = None: pandas.DataFrame of passages, it should have,
# at least, POS count columns, passage name and the section
# columns
# passage_name_clm_q = None: column name of the passage names
# in the lemma question DataFrame
# passage_sec_clm_q = None: column name of the passage sections
# in the lemma question DataFrame
# passage_name_clm_p = None: column name of the passage names
# in the passage DataFrame
# passage_sec_clm_p = None: column name of the passage sections
# in the passage DataFrame
# pos_start_p: integer column number (starting from zero)
# specifying the starting point of POS count
# columns in the passage DataFrame, from the point
# to the end, all the columns should be the POS
# count columns
# decimal_places = None: specify the decimal places to round at
# df_ac_overlapping_hypernyms = None: pandas.DataFrame as a result
# of overlapping hypernym counts
# df_ac_overlapping_hyponyms = None: pandas.DataFrame as a result
# of overlapping hyponym counts
# nchunk_suffix = '_nc': specify the suffix of NChunk variables
# which was used for the column names of
# the overlapping NChunk
# hypernym_suffix = '_hype': specify the suffix of hypernym variables
# which was used for the column names of
# the overlapping hypernyms
# hyponym_suffix = '_hypo': specify the suffix of hyponym variables
# which was used for the column names of
# the overlapping hyponyms
# df_ac_bigram_pmi_distribution = None: pandas.DataFrame as bigram
# PMI stats
# df_ac_trigram_pmi_distribution = None: pandas.DataFrame as trigram
# PMI stats
# Returns Result: pandas.DataFrame including the original columns of
# the df_ac_pos DataFrame plus aggregated result columns
################################################################################
def ac_aggregate_plim(df_ac_pos, pos_start_q, df_ac_loc_overlapping_lemma,
df_ac_loc_overlapping_syn_lemma, df_ac_overlapping_nchunk,
df_ac_oanc_lemma_freq_q, stem_option_name_clm, stem_identifier,
keep_specific_columns_POS = None, stop_words_POS = None,
df_ac_lemma_q = None, include_specific_lemma_count = None,
df_ac_pos_p = None, passage_name_clm_q = None, passage_sec_clm_q =None,
passage_name_clm_p = None, passage_sec_clm_p = None,
pos_start_p = None, decimal_places = None,
df_ac_overlapping_hypernyms = None, df_ac_overlapping_hyponyms = None,
nchunk_suffix = '_nc', hypernym_suffix = '_hype',
hyponym_suffix = '_hypo', df_ac_bigram_pmi_distribution = None,
df_ac_trigram_pmi_distribution = None):
import pandas as pd
df_ac_buf_POS = df_ac_pos.iloc[:, pos_start_q:]
all_option_count_name_clms = []
df_ac_options = df_ac_pos.drop_duplicates([stem_option_name_clm])
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count_s_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + nchunk_suffix + '_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + hypernym_suffix + '_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + hyponym_suffix + '_' + x
all_option_count_name_clms.append(s)
option_len = len(all_option_count_name_clms) // 5
if stop_words_POS != None:
df_ac_buf_POS = df_ac_buf_POS.drop(stop_words_POS, axis=1)
df_ac_buf_sum = pd.DataFrame({ 'POS_sum' : df_ac_buf_POS.sum(axis=1) })
if keep_specific_columns_POS != None:
df_ac_buf_POS_head = df_ac_pos.loc[:, keep_specific_columns_POS]
else:
df_ac_buf_POS_head = df_ac_pos.copy()
df_ac_buf_POS_head['POS_sum'] = df_ac_buf_sum['POS_sum']
if df_ac_loc_overlapping_lemma is not None:
df_concat = pd.concat([df_ac_buf_POS_head, df_ac_loc_overlapping_lemma], axis=1)
else:
df_concat = df_ac_buf_POS_head.copy()
df_concat_tmp = df_concat.copy()
if df_ac_loc_overlapping_syn_lemma is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_loc_overlapping_syn_lemma], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_overlapping_nchunk is not None:
df_concat = | pd.concat([df_concat_tmp, df_ac_overlapping_nchunk, df_ac_oanc_lemma_freq_q], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#!/usr/bin/env python
# coding: utf-8
import os
import pandas as pd
from selenium import webdriver
from lxml import etree
import time
import jieba
import re
import numpy as np
url1 = input("请输入您所需要爬取的网页(知乎)")
browser = webdriver.Chrome("/Users/apple/Downloads/chromedriver_mac_mac")
browser.get(url1)
try:
#点击问题全部内容
button1 = browser.find_elements_by_xpath("""//div[@class= "QuestionHeader-detail"]
//button[contains(@class,"Button") and contains(@class,"QuestionRichText-more")
and contains(@class , "Button--plain")
]""")[0]
button1.click()
except:
print('这个问题比较简单,并没有问题的全部内容哦!')
#此网页就属于异步加载的情况
#那么我们就需要多次下滑
for i in range(20):
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(0.5)
print(i)
#点击知乎的登陆弹窗
button2 = browser.find_elements_by_xpath("""//button[@aria-label = '关闭']""")[0]
button2.click()
# #点击知乎的“查看全部回答”按钮
# button3 = browser.find_elements_by_xpath("""//div[@class = 'Question-main']
# //a[contains(@class,"ViewAll-QuestionMainAction") and contains(@class , "QuestionMainAction") ]""")[1]
# button3.click()
final_end_it = browser.find_elements_by_xpath("""//button[contains(@class,"Button")
and contains(@class ,'QuestionAnswers-answerButton')
and contains(@class ,'Button--blue')
and contains(@class ,'Button--spread')
]""")
while final_end_it == []:
final_end_it = browser.find_elements_by_xpath("""//button[contains(@class,"Button")
and contains(@class ,'QuestionAnswers-answerButton')
and contains(@class ,'Button--blue')
and contains(@class ,'Button--spread')
]""")
js="var q=document.documentElement.scrollTop=0"
browser.execute_script(js)
for i in range(30):
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(0.5)
print(i)
# #定位按钮
# #查看全部 6,376 个回答
# #得到事件的具体描述,以及网友的评论文本(或许要?)
# button = browser.find_elements_by_xpath("""//a[@class='load']""")[0]
# button.click()
#应该在爬取的时候,进行存储,不然之后若是过多,会有事情的!可能会加载不出来??
# In[ ]:
final_end_it = browser.find_elements_by_xpath("""//button[contains(@class,"Button")
and contains(@class ,'QuestionAnswers-answerButton')
and contains(@class ,'Button--blue')
and contains(@class ,'Button--spread')
]""")
while final_end_it == []:
final_end_it = browser.find_elements_by_xpath("""//button[contains(@class,"Button")
and contains(@class ,'QuestionAnswers-answerButton')
and contains(@class ,'Button--blue')
and contains(@class ,'Button--spread')
]""")
js="var q=document.documentElement.scrollTop=0"
browser.execute_script(js)
for i in range(30):
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(0.5)
print(i)
# #定位按钮
# #查看全部 6,376 个回答
# #得到事件的具体描述,以及网友的评论文本(或许要?)
# button = browser.find_elements_by_xpath("""//a[@class='load']""")[0]
# button.click()
#应该在爬取的时候,进行存储,不然之后若是过多,会有事情的!可能会加载不出来??
# In[2]:
dom = etree.HTML(browser.page_source)
# # 对于问题本身的数据
# In[4]:
Followers_number_first = dom.xpath("""//div[@class="QuestionFollowStatus"]//div[@class = "NumberBoard-itemInner"]/strong/text()""")[0]
Browsed_number_first = dom.xpath("""//div[@class="QuestionFollowStatus"]//div[@class = "NumberBoard-itemInner"]/strong/text()""")[1]
#关注者数量
Followers_number_final = re.sub(",","",Followers_number_first)
#浏览数量
Browsed_number_final = re.sub(",","",Browsed_number_first)
#问题链接
problem_url = url1
#问题ID
problem_id = re.findall(r"\d+\.?\d*",url1)
#问题标题
problem_title = dom.xpath("""//div[@class = 'QuestionHeader']//h1[@class = "QuestionHeader-title"]/text()""")
#问题点赞数
problem_endorse = dom.xpath("""//div[@class = 'QuestionHeader']//div[@class = "GoodQuestionAction"]/button/text()""")
#问题评论数
problem_Comment = dom.xpath("""//div[@class = 'QuestionHeader']//div[@class = "QuestionHeader-Comment"]/button/text()""")
#问题回答数
answer_number = dom.xpath("""//div[@class = 'Question-main']//h4[@class = "List-headerText"]/span/text()""")
#问题标签
problem_tags_list = dom.xpath("""//div[@class = 'QuestionHeader-topics']//a[@class = "TopicLink"]/div/div/text()""")
# # 对于回答本身的数据
# In[5]:
#具体内容
comment_list = dom.xpath("""//div[@class = 'List-item']//div[@class = "RichContent-inner"]""")
comment_list_text = []
for comment in comment_list:
comment_list_text.append(comment.xpath("string(.)"))
#发表时间
time_list = dom.xpath("""//div[@class = 'List-item']//div[@class = "ContentItem-time"]//span/@data-tooltip""")
edit_time_list = dom.xpath("""//div[@class = 'List-item']//div[@class = "ContentItem-time"]//span/text()""")
#点赞数
endorse_list = dom.xpath("""//div[@class = 'List-item']//button[contains(@class,"Button") and contains(@class,"VoteButton") and contains(@class , "VoteButton--up")]/@aria-label""")
#评论人数
number_of_endorse_list = dom.xpath("""//div[@class = 'List-item']//svg[contains(@class,"Zi") and contains(@class,"Zi--Comment")
and contains(@class,"Button-zi")]/../../text()""")
#回答链接
answers_url_list = dom.xpath("""//div[@class = 'List-item']//div[contains(@class,"ContentItem") and contains(@class,"AnswerItem")]
/meta[@itemprop = "url"]/@content""")
authors_list = dom.xpath("""//div[@class = 'List-item']//div[contains(@class,"ContentItem") and contains(@class,"AnswerItem")]
/@data-zop""")
#作者姓名
authorName_list = []
#作者id
authorid_list = []
for i in authors_list:
authorName_list.append(eval(i)['authorName'])
authorid_list.append(eval(i)["itemId"])
# # 合成数据框
# In[6]:
data = | pd.DataFrame() | pandas.DataFrame |
import os
import time
import uuid
from uuid import uuid5
import logging
from importlib import import_module
from typing import List, Union, Callable, Tuple
import numpy as np
import fire
import requests
import pandas as pd
from bs4 import BeautifulSoup
from yadil.web.scraper_config import default_config as config
def delay_mean_and_std(mean: int = 5, std: int = 3):
if not delay_mean_and_std.init:
np.random.seed(delay_mean_and_std.seed)
delay_mean_and_std.init = True
time.sleep(np.abs(np.random.normal(loc=mean, scale=std) / 1.0))
delay_mean_and_std.seed = 123456
delay_mean_and_std.init = False
class VisitedUrls(object):
def __init__(self):
self.urls = {}
def check_if_visited_and_add(self, url: str = None):
try:
url_parts = url.split("/")
urls = self.urls
for p in url_parts:
if p in urls.keys():
urls = urls[p]
elif p == url_parts[-1]:
urls[p] = True
return False
else:
urls[p] = {}
urls = urls[p]
return True
except:
return False
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
def _download(config, url) -> Tuple[str, bytes]:
try:
if config.visited.check_if_visited_and_add(url):
logger.warn("url : {} is already visited.".format(url))
return None
r = requests.get(url, allow_redirects=True)
delay_mean_and_std(config.DELAY_MEAN, config.DELAY_STD)
return r.headers["content-type"], r.content
except Exception as e:
logger.error("download error : {}".format(url))
return None
def _save_image(config, url, content):
try:
if content:
unq_id = str(uuid5(uuid.NAMESPACE_URL, name=url))
filename = os.path.join(config.OUTPUT_DIR, unq_id + ".jpg")
with open(filename, "wb") as f:
f.write(content)
logger.info("Saving {} as {}".format(url, unq_id))
df = | pd.DataFrame(data={"url": [url], "uuid": [unq_id]}, columns=["url", "uuid"]) | pandas.DataFrame |
import pandas as pd
import re
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics.pairwise import cosine_similarity
df = pd.read_csv("amazon_data.csv")
df_cleaned = df.drop( axis=0, columns= ['bestsellers_rank_main_name', 'bestsellers_rank_main_rank', 'bestsellers_rank_sub_0_name', 'price_shipping', 'dimensions_cm_raw',
'price_per_unit', 'fulfillment_type', 'fulfillment_is_sold_by_amazon', 'attributes_variation_size_name', 'fulfillment_is_fulfilled_by_amazon', 'weight_tier',
'fulfillment_is_fulfilled_by_third_party','fulfillment_is_sold_by_third_party', 'price_buybox', 'price_rrp', 'weight_raw', 'weight_unit', 'weight_gramme',
'bestsellers_rank_sub_0_rank', 'labels_choice_keyword', 'labels_is_amazon_bestseller', 'labels_is_amazon_choice', 'offers_quantity', 'attributes_variation_style_name',
'bestsellers_rank_sub_1_name', 'bestsellers_rank_sub_1_rank', 'upc_number', 'ean_number', 'review_ratings_total', 'dimensions_type', 'dimensions_unit',
'dimensions_tier', 'part_number', 'parent_asin', 'variants_quantity', 'answered_questions_count', 'project_name', 'marketplace', 'attributes_variation_color_name',
'gtin_number','bestsellers_rank_sub_2_name', 'bestsellers_rank_sub_2_rank', 'estimations_sales_daily', 'dimensions_in_raw', 'attributes_variation_fit_type',
'estimations_sales_monthly', 'estimations_revenue_daily', 'model_number', 'keywords', 'review_rating', 'amazon_label', 'date_time', 'attributes_variation_number_of_items',
'estimations_revenue_monthly', 'estimations_revenue_per_review', 'price_symbol', 'aplus_feature', 'brand_name', 'seller_name', 'seller_id', 'is_used', 'asin',
'date_listed_iso', 'manufacturer', 'asins', 'images_count', 'review_by_stars_five_star', 'review_by_stars_four_star', 'review_by_stars_three_star', 'review_by_stars_two_star', 'review_by_stars_one_star'])
titles = df_cleaned["title"]
bullet1 = df_cleaned["features_bullet_point_0"].astype(str)
bullet2 = df_cleaned["features_bullet_point_1"].astype(str)
bullet3 = df_cleaned["features_bullet_point_2"].astype(str)
bullet4 = df_cleaned["features_bullet_point_3"].astype(str)
bullet5 = df_cleaned["features_bullet_point_4"].astype(str)
description = df_cleaned["features_description"].astype(str)
text_list = [titles, bullet1, bullet2, bullet3, bullet4, bullet5, description]
#Extracting Age:
def get_age_in_years(texts):
"""extracts numbers that occure with the words year(s), Year(s), Jahr(e), jahr(e) """
age = r"(................[1234567890]................)"
year_b = r"(................year................)"
year_c = r"(................Year................)"
year_d = r"(................jahr................)"
year_e = r"(................Jahr................)"
agey = []
for n in texts:
a = re.findall(age,n)
text_y = ""
if a:
b = re.findall(year_b,n)
if b:
text_y = b[0]
else:
c = re.findall(year_c,n)
if c:
text_y=c[0]
else:
d = re.findall(year_d,n)
if d:
text_y = d[0]
else:
e = re.findall(year_e,n)
if e:
text_y = e[0]
agey.append(text_y)
return agey
def get_age_in_months(texts):
"""extracts numbers that occure with the words month(s), Month(s), monat(e), Monat(e) """
age = r"..........[1234567890]............"
month_b = r"(................month................)"
month_c = r"(................Month................)"
month_d = r"(................monat................)"
month_e = r"(................Monat................)"
agem = []
for n in texts:
a = re.findall(age,n)
text = ""
if a:
b = re.findall(month_b,n)
if b:
text = b[0]
else:
c = re.findall(month_c,n)
if c:
text=c[0]
else:
d = re.findall(month_d,n)
if d:
text = d[0]
else:
e = re.findall(month_e,n)
if e:
text = e[0]
agem.append(text)
return agem
titles_agey = get_age_in_years(titles)
titles_agem = get_age_in_months(titles)
bullet1_agey = get_age_in_years(bullet1)
bullet1_agem = get_age_in_months(bullet1)
bullet2_agey = get_age_in_years(bullet2)
bullet2_agem = get_age_in_months(bullet2)
bullet3_agey = get_age_in_years(bullet3)
bullet3_agem = get_age_in_months(bullet3)
bullet4_agey = get_age_in_years(bullet4)
bullet4_agem = get_age_in_months(bullet4)
bullet5_agey = get_age_in_years(bullet5)
bullet5_agem = get_age_in_months(bullet5)
description_agey = get_age_in_years(description)
description_agem = get_age_in_months(description)
def get_age_in_numbers(texts):
"""extracts numbers without text """
number = r"[^0-9]+(\b\d+\b)[^0-9]+(\b\d+\b)[^0-9]+(\b\d+\b)[^0-9]+(\b\d+\b).+"
agen = []
for n in texts:
a = re.findall(number,n)
text = ""
if a:
text = a[0]
agen.append(set(text))
return agen
titles_agey = get_age_in_numbers(titles_agey)
titles_agem = get_age_in_numbers(titles_agem)
bullet1_agey = get_age_in_numbers(bullet1_agey)
bullet1_agem = get_age_in_numbers(bullet1_agem)
bullet2_agey = get_age_in_numbers(bullet2_agey)
bullet2_agem = get_age_in_numbers(bullet2_agem)
bullet3_agey = get_age_in_numbers(bullet3_agey)
bullet3_agem = get_age_in_numbers(bullet3_agem)
bullet4_agey = get_age_in_numbers(bullet4_agey)
bullet4_agem = get_age_in_numbers(bullet4_agem)
bullet5_agey = get_age_in_numbers(bullet5_agey)
bullet5_agem = get_age_in_numbers(bullet5_agem)
description_agey = get_age_in_numbers(description_agey)
description_agem = get_age_in_numbers(description_agem)
df_cleaned["titles_agey"] = np.asarray(titles_agey)
df_cleaned["titles_agem"] = np.asarray(titles_agem)
df_cleaned["bullet1_agey"] = np.asarray(bullet1_agey)
df_cleaned["bullet1_agem"] = np.asarray(bullet1_agem)
df_cleaned["bullet2_agey"] = np.asarray(bullet2_agey)
df_cleaned["bullet2_agem"] = np.asarray(bullet2_agem)
df_cleaned["bullet3_agey"] = np.asarray(bullet3_agey)
df_cleaned["bullet3_agem"] = np.asarray(bullet3_agem)
df_cleaned["bullet4_agey"] = np.asarray(bullet4_agey)
df_cleaned["bullet4_agem"] = np.asarray(bullet4_agem)
df_cleaned["bullet5_agey"] = np.asarray(bullet5_agey)
df_cleaned["bullet5_agem"] = np.asarray(bullet5_agem)
df_cleaned["description_agey"] = np.asarray(description_agey)
df_cleaned["description_agem"] = np.asarray(description_agem)
df_age = df_cleaned.drop( axis=0, columns= ['features_bullet_point_0', 'features_bullet_point_1', 'features_bullet_point_2', 'features_bullet_point_3', 'features_bullet_point_4',
'features_description', 'feature_bullets_count', 'images_row'])
list_years=[]
for i in range(len(df_age)):
h = df_age.loc[i, "titles_agey"].union(df_age.loc[i, "bullet1_agey"]).union(df_age.loc[i, "bullet2_agey"]).union(df_age.loc[i, "bullet3_agey"]).union(df_age.loc[i, "bullet4_agey"]).union(df_age.loc[i, "bullet5_agey"]).union(df_age.loc[i, "description_agey"])
list_years.append(h)
df_age["age_years"] = list_years
list_months=[]
for i in range(len(df_age)):
h = df_age.loc[i, "titles_agem"].union(df_age.loc[i, "bullet1_agem"]).union(df_age.loc[i, "bullet2_agem"]).union(df_age.loc[i, "bullet3_agem"]).union(df_age.loc[i, "bullet4_agem"]).union(df_age.loc[i, "bullet5_agem"]).union(df_age.loc[i, "description_agem"])
list_months.append(h)
df_age["age_years"] = list_years
df_age["age_months"] = list_months
df_age['new_age_in_months'] = df_age.age_years.apply(lambda z: set({int(x)*12 for x in z}))
df_age.age_months = df_age.age_months.apply(lambda y: set({int(x) for x in y}) )
new_row = []
for i,row in df_age.iterrows():
new_row.append(row[-1].union(row[5]))
df_age['age'] = new_row
df_age = df_age.drop( axis=0, columns= ['bullet3_agey', 'bullet3_agem', 'bullet4_agey', 'bullet4_agem', 'bullet5_agey', 'bullet5_agem', 'bullet1_agey', 'bullet1_agem', 'titles_agey',
'titles_agem','bullet2_agey', 'bullet2_agem', 'description_agey', 'description_agem'])
df_age['list_of_age'] = df_age.age.apply(lambda x: [int(y) for y in list(x)])
df_age['0_24'] = df_age.list_of_age.apply(lambda y : 1 if sum([1 if x >=0 and x <=24 else 0 for x in y])else 0)
df_age["25-60"]= df_age.list_of_age.apply(lambda y : 1 if sum([1 if x >=25 and x <=60 else 0 for x in y])else 0)
df_age['72+'] = df_age.list_of_age.apply(lambda y : 1 if sum([1 if x >=72 else 0 for x in y])else 0)
df_age.columns[5:]
df_age = df_age.drop(axis=0, columns=["list_of_age", "age_months", "new_age_in_months", "age", "age_years"])
# Extracting skills:
age = r"..........[1234567890]............"
motor = r"(motor)"
motor_c = r"(Motor)"
language = r"(language)"
language_c = r"(Language)"
math = r"(math)"
math_c = r"(Math)"
cognative = r"(cognative)"
cognative_c = r"(Cognative)"
numbers = r"(number)"
numbers_c = r"(Number)"
stem = r"(stem)"
stem_c = r"(Stem)"
stem_cc = r"(STEM)"
emotional = r"(emotional)"
emotional_c = r"(Emotional)"
social = r"(social)"
social_c = r"(Social)"
def get_skills(text,reg):
skill = []
for n in text:
lines = ""
a = re.findall(age,n)
if a:
b = re.findall(reg, n)
if b:
lines = b[0]
skill.append(lines)
return skill
for t in text_list:
df_age["motor"] = get_skills(t, motor)
df_age["motor_c"]= get_skills(t, motor_c)
df_age["language"] = get_skills(t, language)
df_age["language_c"]= get_skills(t, language_c)
df_age["math"] = get_skills(t, math)
df_age["math_c"]= get_skills(t, math_c)
df_age["cognative"] = get_skills(t, cognative)
df_age["cognative_c"]= get_skills(t, cognative_c)
df_age["numbers"] = get_skills(t, numbers)
df_age["numbers_c"]= get_skills(t, numbers_c)
df_age["stem"] = get_skills(t, stem)
df_age["stem_c"]= get_skills(t, stem_c)
df_age["stem_cc"]= get_skills(t, stem_cc)
df_age["emotional"] = get_skills(t, emotional)
df_age["emotional_c"]= get_skills(t, emotional_c)
df_age["social"] = get_skills(t, social)
df_age["social_c"]= get_skills(t, social_c)
# Putting columns of same skill together
df_age_skills = df_age
df_age_skills["motor_skills"] = df_age_skills["motor"] + df_age_skills["motor_c"]
df_age_skills["language_skills"] = df_age_skills["language"] + df_age_skills["language_c"]
df_age_skills["math_skills"] = df_age_skills["math"] + df_age_skills["math_c"]
df_age_skills["cognative_skills"] = df_age_skills["cognative"] + df_age_skills["cognative_c"]
df_age_skills["numbers_skills"] = df_age_skills["numbers"] + df_age_skills["numbers_c"]
df_age_skills["stem_skills"] = df_age_skills["stem"] + df_age_skills["stem_c"] + df_age_skills["stem_cc"]
df_age_skills["emotional_skills"] = df_age_skills["emotional"] + df_age_skills["emotional_c"]
df_age_skills["social_skills"] = df_age_skills["social"] + df_age_skills["social_c"]
df_merged = df_age_skills.drop(axis=0, columns=["motor", "motor_c", "language", "language_c", "math", "math_c", "cognative", "cognative_c", "numbers", "numbers_c", "stem", "stem_c", "stem_cc", "emotional", "emotional_c", "social", "social_c"])
# Replacing empty strings with NaN
df_merged = df_merged.replace(r'^\s*$', np.NaN, regex=True)
# Unifying values in skills columns
df_merged["math_skills"].replace({"math": "stem", "Math": "stem"}, inplace=True)
df_merged["numbers_skills"].replace({"number": "stem", "Numbers": "stem"}, inplace=True)
df_merged["stem_skills"] = df_merged["math_skills"] + df_merged["numbers_skills"]
df_merged = df_merged.drop(axis=0, columns=["math_skills", "numbers_skills"])
df_merged["stem_skills"].replace({"stemstem": "stem", "stemnumberNumber": "stem", "stemNumber": "stem", "mathMathstem": "stem", "mathMathnumberNumber": "stem" }, inplace=True)
df_merged["motor_skills"].replace({"Motor": "motor", "motorMotor": "motor" }, inplace=True)
df_merged["language_skills"].replace({"Language": "language"}, inplace=True)
df_merged = df_merged.drop(axis=0, columns=["cognative_skills"])
df_merged["emotional_skills"].replace({"Emotional": "emotional", "emotionalEmotional": "emotional"}, inplace=True)
df_merged["social_skills"].replace({"Social": "social"}, inplace=True)
df_final = df_merged
# One hot encoding
one_hot = OneHotEncoder(sparse=False, handle_unknown='ignore')
m = df_final['motor_skills'].to_numpy().reshape(-1, 1)
one_hot.fit(m)
motor = one_hot.transform(m)
motor_df = pd.DataFrame(motor, columns= ["motor", "drop"]).drop( axis=0, columns='drop')
l = df_final['language_skills'].to_numpy().reshape(-1, 1)
one_hot.fit(l)
language = one_hot.transform(l)
language_df = pd.DataFrame(language, columns= ["language", "drop"]).drop( axis=0, columns='drop')
st= df_final['stem_skills'].to_numpy().reshape(-1, 1)
one_hot.fit(st)
stem = one_hot.transform(st)
stem_df = pd.DataFrame(stem, columns= ["stem", "drop"]).drop( axis=0, columns='drop')
e = df_final['emotional_skills'].to_numpy().reshape(-1, 1)
one_hot.fit(e)
emotional = one_hot.transform(e)
emotional_df = pd.DataFrame(emotional, columns= ["emotional", "drop"]).drop( axis=0, columns='drop')
s = df_final['social_skills'].to_numpy().reshape(-1, 1)
one_hot.fit(s)
social = one_hot.transform(s)
social_df = pd.DataFrame(social, columns= ["social", "drop"]).drop( axis=0, columns='drop')
df_final = | pd.concat([df_final, motor_df, language_df, stem_df, emotional_df, social_df], axis=1) | pandas.concat |
import os
from datetime import datetime
import nose
import pandas as pd
from pandas import compat
from pandas.util.testing import network, assert_frame_equal, with_connectivity_check
from numpy.testing.decorators import slow
import pandas.util.testing as tm
if compat.PY3:
raise nose.SkipTest("python-gflags does not support Python 3 yet")
try:
import httplib2
import pandas.io.ga as ga
from pandas.io.ga import GAnalytics, read_ga
from pandas.io.auth import AuthenticationConfigError, reset_default_token_store
from pandas.io import auth
except ImportError:
raise nose.SkipTest("need httplib2 and auth libs")
class TestGoogle(tm.TestCase):
_multiprocess_can_split_ = True
def test_remove_token_store(self):
auth.DEFAULT_TOKEN_FILE = 'test.dat'
with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh:
fh.write('test')
reset_default_token_store()
self.assertFalse(os.path.exists(auth.DEFAULT_TOKEN_FILE))
@with_connectivity_check("http://www.google.com")
def test_getdata(self):
try:
end_date = datetime.now()
start_date = end_date - pd.offsets.Day() * 5
end_date = end_date.strftime('%Y-%m-%d')
start_date = start_date.strftime('%Y-%m-%d')
reader = GAnalytics()
df = reader.get_data(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']},
index_col=0)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(df.index, pd.DatetimeIndex)
self.assertGreater(len(df), 1)
self.assertTrue('date' not in df)
self.assertTrue('hour' not in df)
self.assertEqual(df.index.name, 'ts')
self.assertTrue('avgTimeOnSite' in df)
self.assertTrue('visitors' in df)
self.assertTrue('newVisits' in df)
self.assertTrue('pageviewsPerVisit' in df)
df2 = read_ga(
metrics=['avgTimeOnSite', 'visitors', 'newVisits',
'pageviewsPerVisit'],
start_date=start_date,
end_date=end_date,
dimensions=['date', 'hour'],
parse_dates={'ts': ['date', 'hour']},
index_col=0)
assert_frame_equal(df, df2)
except AuthenticationConfigError:
raise nose.SkipTest("authentication error")
@with_connectivity_check("http://www.google.com")
def test_iterator(self):
try:
reader = | GAnalytics() | pandas.io.ga.GAnalytics |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 17:13:29 2018
@author: pamelaanderson
"""
from difflib import SequenceMatcher
import json
import numpy as np
import os
import operator
import pandas as pd
def load_adverse_events(path, year, q):
""" Loading adverse drug events while performing basic pre-processing"""
path_w_year = path + year + '/' + q + '/'
json_files = os.listdir(path_w_year)
df_adverse_ev = pd.DataFrame()
file_tot = [file for file in json_files if file not in ['.DS_Store']]
ind = 0
for file in file_tot:
print(file)
adverse_ev_data = json.load(open(path_w_year + file))
df_adverse_ev_json = pd.DataFrame(adverse_ev_data['results'])
df_adverse_ev = pd.concat([df_adverse_ev, df_adverse_ev_json])
del adverse_ev_data, df_adverse_ev_json
ind += 1
df_adverse_ev = df_adverse_ev.reset_index(drop=True)
# Change data types to correct format
df_adverse_ev = format_kept_cells(df_adverse_ev)
# Find drug application number from nested dictionary
df_adverse_ev = extract_drug_app_num_from_ad_ev(df_adverse_ev)
# Find patient features from nested dictionary
df_adverse_ev = extract_patient_features(df_adverse_ev)
# Find drug info from nested dictionary
df_adverse_ev = extract_drug_features(df_adverse_ev)
# Find who submitted report info as column in df
df_adverse_ev = extract_source_info(df_adverse_ev)
# Drop columns that will not be included as features
df_adverse_ev = drop_unneeded_cols(df_adverse_ev)
return df_adverse_ev
def drop_unneeded_cols(df_adverse_ev):
""" Drop the columns that will not be used as features """
drop_cols = ['companynumb','duplicate', 'occurcountry',
'patient',
'primarysourcecountry', 'receiptdateformat',
'receiver', 'receivedate', 'receivedateformat', 'reportduplicate',
'reporttype','safetyreportid',
'safetyreportversion', 'sender',
'transmissiondate','transmissiondateformat']
df_adverse_ev = df_adverse_ev.drop(drop_cols, axis=1)
return df_adverse_ev
def format_kept_cells(df_adverse_ev):
""" Correct data types (to numeric or datetime) """
df_adverse_ev['fulfillexpeditecriteria'] = pd.to_numeric(df_adverse_ev['fulfillexpeditecriteria'])
df_adverse_ev['serious'] = pd.to_numeric(df_adverse_ev['serious'])
df_adverse_ev['seriousnesscongenitalanomali'] = pd.to_numeric(df_adverse_ev['seriousnesscongenitalanomali'])
df_adverse_ev['seriousnessdeath'] = pd.to_numeric(df_adverse_ev['seriousnessdeath'])
df_adverse_ev['seriousnessdisabling'] = pd.to_numeric(df_adverse_ev['seriousnessdisabling'])
df_adverse_ev['seriousnesshospitalization'] = pd.to_numeric(df_adverse_ev['seriousnesshospitalization'])
df_adverse_ev['seriousnesslifethreatening'] = pd.to_numeric(df_adverse_ev['seriousnesslifethreatening'])
df_adverse_ev['seriousnessother'] = pd.to_numeric(df_adverse_ev['seriousnessother'])
df_adverse_ev['receiptdate'] = pd.to_datetime(df_adverse_ev['receiptdate'])
cols_to_convert_na_to_0 = ['serious',
'seriousnesscongenitalanomali',
'seriousnessdeath',
'seriousnessdisabling',
'seriousnesshospitalization',
'seriousnesslifethreatening',
'seriousnessother']
df_adverse_ev[cols_to_convert_na_to_0] = df_adverse_ev[ cols_to_convert_na_to_0 ].fillna(value=0)
return df_adverse_ev
def extract_drug_features(df_adverse_ev):
""" Find the relevant information about the drugs """
medic_product = []
drug_indict = []
drug_route = []
drug_char = []
for i in range(0,len(df_adverse_ev)):
col_names = list(df_adverse_ev.iloc[i]['patient']['drug'][0].keys())
if 'medicinalproduct' in col_names:
medic_product.append(df_adverse_ev.iloc[i]['patient']['drug'][0]['medicinalproduct'])
else:
medic_product.append(np.nan)
if 'drugindication' in col_names:
drug_indict.append(df_adverse_ev.iloc[i]['patient']['drug'][0]['drugindication'])
else:
drug_indict.append(np.nan)
if 'drugadministrationroute' in col_names:
drug_route.append(df_adverse_ev.iloc[i]['patient']['drug'][0]['drugadministrationroute'])
else:
drug_route.append(np.nan)
if 'drugcharacterization' in col_names:
drug_char.append(df_adverse_ev.iloc[i]['patient']['drug'][0]['drugcharacterization'])
else:
drug_char.append(np.nan)
drug_info = pd.DataFrame({'medic_product' : medic_product,
'drug_indict' : drug_indict,
'drug_route' : drug_route,
'drug_char' : drug_char})
df_adverse_ev = pd.concat([df_adverse_ev, drug_info], axis=1)
return df_adverse_ev
def extract_source_info(df_adverse_ev):
""" Find information about who submitted the report """
qual_list = []
for i in range(0,len(df_adverse_ev)):
if df_adverse_ev.iloc[i]['primarysource'] is not None:
col_names = list(df_adverse_ev.iloc[i]['primarysource'].keys())
if 'qualification' in col_names:
qual_list.append( | pd.to_numeric(df_adverse_ev.iloc[i]['primarysource']['qualification']) | pandas.to_numeric |
import cv2
import numpy as np
import os
import pandas as pd
from tkg import GUI
ix,iy,ix2,iy2 = -1,-1,-1,-1
# mouse callback function
flag=0
k=None
img=None
def draw_circle(event,x,y,flags,param):
global ix,iy,ix2,iy2,flag
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),100,(255,0,0),-1)
if flag ==0:
ix,iy = x,y
flag=1
elif flag== 1:
ix2,iy2=x,y
flag=0
def main():
global k,flag,img
print("Select Path First")
o=GUI()
o.gui()
df = | pd.DataFrame(columns=["Filename","x1","y1","x2","y2","classname"]) | pandas.DataFrame |
import pandas as pd
from pandas import Series, Period, PeriodIndex, date_range
class PeriodProperties(object):
params = ['M', 'min']
param_names = ['freq']
def setup(self, freq):
self.per = Period('2012-06-01', freq=freq)
def time_year(self, freq):
self.per.year
def time_month(self, freq):
self.per.month
def time_day(self, freq):
self.per.day
def time_hour(self, freq):
self.per.hour
def time_minute(self, freq):
self.per.minute
def time_second(self, freq):
self.per.second
def time_is_leap_year(self, freq):
self.per.is_leap_year
def time_quarter(self, freq):
self.per.quarter
def time_qyear(self, freq):
self.per.qyear
def time_week(self, freq):
self.per.week
def time_daysinmonth(self, freq):
self.per.daysinmonth
def time_dayofweek(self, freq):
self.per.dayofweek
def time_dayofyear(self, freq):
self.per.dayofyear
def time_start_time(self, freq):
self.per.start_time
def time_end_time(self, freq):
self.per.end_time
class PeriodUnaryMethods(object):
params = ['M', 'min']
param_names = ['freq']
def setup(self, freq):
self.per = Period('2012-06-01', freq=freq)
def time_to_timestamp(self, freq):
self.per.to_timestamp()
def time_now(self, freq):
self.per.now(freq)
def time_asfreq(self, freq):
self.per.asfreq('A')
class PeriodIndexConstructor(object):
goal_time = 0.2
params = ['D']
param_names = ['freq']
def setup(self, freq):
self.rng = date_range('1985', periods=1000)
self.rng2 = date_range('1985', periods=1000).to_pydatetime()
def time_from_date_range(self, freq):
PeriodIndex(self.rng, freq=freq)
def time_from_pydatetime(self, freq):
| PeriodIndex(self.rng2, freq=freq) | pandas.PeriodIndex |
# Data Preprocessing
"""ML_Workflow template with required libraries and function calls.
@author:Varshtih
"""
import pandas as pd
import numpy as np
from autoimpute.imputations import MultipleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from scipy import stats
import sweetviz
import seaborn as sns
from pyod.models.feature_bagging import FeatureBagging
# Load Input Files
train_data = pd.read_csv(r"C:\Users\svvar\PycharmProjects\ml_workflow\Algorithims\Data Files\train.csv")
test_data = pd.read_csv(r"C:\Users\svvar\PycharmProjects\ml_workflow\Algorithims\Data Files\test.csv")
train_data.info()
test_data.info()
# Fill in required Inputs
x_train = train_data.iloc[:, list(range(3, 11))]
y_train = train_data.iloc[:, list(range(11,12))].values
x_train_num = train_data.iloc[:, list(range(3, 9))]
x_train_txt = train_data.iloc[:, list(range(9, 11))]
x_train_txt_encode_split = 2 # Split at Column Number
x_test = test_data.iloc[:, list(range(3, 11))]
x_test_num = test_data.iloc[:, list(range(3, 9))]
x_test_txt = test_data.iloc[:, list(range(9, 11))]
x_test_txt_encode_split = 2 # Split at Column Number
# Impute Missing values
# Numerical Imputer
imputer_num = MultipleImputer(strategy='stochastic', return_list=True, n=5, seed=101)
x_train_num_avg = imputer_num.fit_transform(x_train_num)
x_train_num_concat = x_train_num_avg[0][1]
for i in range(len(x_train_num_avg)-1):
x_train_num_concat = pd.concat([x_train_num_concat,x_train_num_avg[i+1][1]], axis=1)
x_train_num_avg = x_train_num_concat.groupby(by=x_train_num_concat.columns, axis=1).apply(lambda g: g.mean(axis=1))
x_test_num_avg = imputer_num.fit_transform(x_test_num)
x_test_num_concat = x_test_num_avg[0][1]
for i in range(len(x_test_num_avg)-1):
x_test_num_concat = | pd.concat([x_test_num_concat,x_test_num_avg[i+1][1]], axis=1) | pandas.concat |
"""Helper methods."""
import copy
import glob
import errno
import os.path
import time
import calendar
import numpy
import pandas
import matplotlib.colors
from matplotlib import pyplot
import keras
import tensorflow.keras as tf_keras
import tensorflow.keras.layers as layers
import tensorflow.python.keras.backend as K
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter
from scipy.spatial.distance import cdist
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.metrics import roc_auc_score
from sklearn.metrics import auc as sklearn_auc
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, \
SGDClassifier
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.cluster import KMeans, AgglomerativeClustering
from ai2es_xai_course.plotting import evaluation_plotting
from ai2es_xai_course.utils import keras_metrics as custom_metrics
# TODO(thunderhoser): Split this into different modules.
# Variable names.
METADATA_COLUMNS_ORIG = [
'Step_ID', 'Track_ID', 'Ensemble_Name', 'Ensemble_Member', 'Run_Date',
'Valid_Date', 'Forecast_Hour', 'Valid_Hour_UTC'
]
EXTRANEOUS_COLUMNS_ORIG = [
'Duration', 'Centroid_Lon', 'Centroid_Lat', 'Centroid_X', 'Centroid_Y',
'Storm_Motion_U', 'Storm_Motion_V', 'Matched', 'Max_Hail_Size',
'Num_Matches', 'Shape', 'Location', 'Scale'
]
TARGET_NAME_ORIG = 'RVORT1_MAX-future_max'
TARGET_NAME = 'max_future_vorticity_s01'
BINARIZED_TARGET_NAME = 'strong_future_rotation_flag'
AREA_NAME = 'area_km2'
MAJOR_AXIS_NAME = 'major_axis_km'
MINOR_AXIS_NAME = 'minor_axis_km'
ORIENTATION_NAME = 'orientation_deg'
METADATA_COLUMNS_ORIG_TO_NEW = {
'Step_ID': 'storm_object_name',
'Track_ID': 'storm_cell_name',
'Ensemble_Name': 'ensemble_name',
'Ensemble_Member': 'ensemble_member_name',
'Run_Date': 'init_time_string',
'Valid_Date': 'valid_time_string',
'Forecast_Hour': 'lead_time_hours',
'Valid_Hour_UTC': 'valid_hour'
}
TARGET_COLUMNS_ORIG_TO_NEW = {
TARGET_NAME_ORIG: TARGET_NAME
}
PREDICTOR_COLUMNS_ORIG_TO_NEW = {
'REFL_COM_mean': 'composite_refl_mean_dbz',
'REFL_COM_max': 'composite_refl_max_dbz',
'REFL_COM_min': 'composite_refl_min_dbz',
'REFL_COM_std': 'composite_refl_stdev_dbz',
'REFL_COM_percentile_10': 'composite_refl_prctile10_dbz',
'REFL_COM_percentile_25': 'composite_refl_prctile25_dbz',
'REFL_COM_percentile_50': 'composite_refl_median_dbz',
'REFL_COM_percentile_75': 'composite_refl_prctile75_dbz',
'REFL_COM_percentile_90': 'composite_refl_prctile90_dbz',
'U10_mean': 'u_wind_10metres_mean_m_s01',
'U10_max': 'u_wind_10metres_max_m_s01',
'U10_min': 'u_wind_10metres_min_m_s01',
'U10_std': 'u_wind_10metres_stdev_m_s01',
'U10_percentile_10': 'u_wind_10metres_prctile10_m_s01',
'U10_percentile_25': 'u_wind_10metres_prctile25_m_s01',
'U10_percentile_50': 'u_wind_10metres_median_m_s01',
'U10_percentile_75': 'u_wind_10metres_prctile75_m_s01',
'U10_percentile_90': 'u_wind_10metres_prctile90_m_s01',
'V10_mean': 'v_wind_10metres_mean_m_s01',
'V10_max': 'v_wind_10metres_max_m_s01',
'V10_min': 'v_wind_10metres_min_m_s01',
'V10_std': 'v_wind_10metres_stdev_m_s01',
'V10_percentile_10': 'v_wind_10metres_prctile10_m_s01',
'V10_percentile_25': 'v_wind_10metres_prctile25_m_s01',
'V10_percentile_50': 'v_wind_10metres_median_m_s01',
'V10_percentile_75': 'v_wind_10metres_prctile75_m_s01',
'V10_percentile_90': 'v_wind_10metres_prctile90_m_s01',
'T2_mean': 'temperature_2metres_mean_kelvins',
'T2_max': 'temperature_2metres_max_kelvins',
'T2_min': 'temperature_2metres_min_kelvins',
'T2_std': 'temperature_2metres_stdev_kelvins',
'T2_percentile_10': 'temperature_2metres_prctile10_kelvins',
'T2_percentile_25': 'temperature_2metres_prctile25_kelvins',
'T2_percentile_50': 'temperature_2metres_median_kelvins',
'T2_percentile_75': 'temperature_2metres_prctile75_kelvins',
'T2_percentile_90': 'temperature_2metres_prctile90_kelvins',
'area': AREA_NAME,
'eccentricity': 'eccentricity',
'major_axis_length': MAJOR_AXIS_NAME,
'minor_axis_length': MINOR_AXIS_NAME,
'orientation': ORIENTATION_NAME
}
MAE_KEY = 'mean_absolute_error'
RMSE_KEY = 'root_mean_squared_error'
MEAN_BIAS_KEY = 'mean_bias'
MAE_SKILL_SCORE_KEY = 'mae_skill_score'
MSE_SKILL_SCORE_KEY = 'mse_skill_score'
MAX_PEIRCE_SCORE_KEY = 'max_peirce_score'
AUC_KEY = 'area_under_roc_curve'
MAX_CSI_KEY = 'max_csi'
BRIER_SCORE_KEY = 'brier_score'
BRIER_SKILL_SCORE_KEY = 'brier_skill_score'
PREDICTORS_KEY = 'predictor_matrix'
PERMUTED_FLAGS_KEY = 'permuted_flags'
PERMUTED_INDICES_KEY = 'permuted_predictor_indices'
PERMUTED_COSTS_KEY = 'permuted_cost_matrix'
DEPERMUTED_INDICES_KEY = 'depermuted_predictor_indices'
DEPERMUTED_COSTS_KEY = 'depermuted_cost_matrix'
HIT_INDICES_KEY = 'hit_indices'
MISS_INDICES_KEY = 'miss_indices'
FALSE_ALARM_INDICES_KEY = 'false_alarm_indices'
CORRECT_NULL_INDICES_KEY = 'correct_null_indices'
# Plotting constants.
FIGURE_WIDTH_INCHES = 10
FIGURE_HEIGHT_INCHES = 10
LARGE_FIGURE_WIDTH_INCHES = 15
LARGE_FIGURE_HEIGHT_INCHES = 15
DEFAULT_GRAPH_LINE_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
DEFAULT_GRAPH_LINE_WIDTH = 2
BAR_GRAPH_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
BAR_GRAPH_EDGE_WIDTH = 2
BAR_GRAPH_FONT_SIZE = 14
BAR_GRAPH_FONT_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
GREEN_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
ORANGE_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
PURPLE_COLOUR = numpy.array([117, 112, 179], dtype=float) / 255
GREY_COLOUR = numpy.full(3, 152. / 255)
HISTOGRAM_EDGE_WIDTH = 1.5
HISTOGRAM_FACE_COLOUR = numpy.full(3, 152. / 255)
HISTOGRAM_FACE_COLOUR = matplotlib.colors.to_rgba(HISTOGRAM_FACE_COLOUR, 0.5)
HISTOGRAM_EDGE_COLOUR = numpy.full(3, 152. / 255)
FONT_SIZE = 20
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
# Misc constants.
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
DATE_FORMAT = '%Y%m%d'
DATE_FORMAT_REGEX = '[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]'
GRID_SPACING_KM = 3.
RADIANS_TO_DEGREES = 180. / numpy.pi
RANDOM_SEED = 6695
LAMBDA_TOLERANCE = 1e-10
ELU_FUNCTION_NAME = 'elu'
RELU_FUNCTION_NAME = 'relu'
SELU_FUNCTION_NAME = 'selu'
TANH_FUNCTION_NAME = 'tanh'
SIGMOID_FUNCTION_NAME = 'sigmoid'
ACTIVATION_FUNCTION_NAMES = [
ELU_FUNCTION_NAME, RELU_FUNCTION_NAME, SELU_FUNCTION_NAME,
TANH_FUNCTION_NAME, SIGMOID_FUNCTION_NAME
]
KERNEL_INITIALIZER_NAME = 'glorot_uniform'
BIAS_INITIALIZER_NAME = 'zeros'
METRIC_FUNCTION_LIST = [
custom_metrics.accuracy, custom_metrics.binary_accuracy,
custom_metrics.binary_csi, custom_metrics.binary_frequency_bias,
custom_metrics.binary_pod, custom_metrics.binary_pofd,
custom_metrics.binary_peirce_score, custom_metrics.binary_success_ratio,
custom_metrics.binary_focn
]
# TODO(thunderhoser): Remove word "binary" from these scores.
METRIC_FUNCTION_DICT = {
'accuracy': custom_metrics.accuracy,
'binary_accuracy': custom_metrics.binary_accuracy,
'binary_csi': custom_metrics.binary_csi,
'binary_frequency_bias': custom_metrics.binary_frequency_bias,
'binary_pod': custom_metrics.binary_pod,
'binary_pofd': custom_metrics.binary_pofd,
'binary_peirce_score': custom_metrics.binary_peirce_score,
'binary_success_ratio': custom_metrics.binary_success_ratio,
'binary_focn': custom_metrics.binary_focn
}
DEFAULT_NEURON_COUNTS = numpy.array([1000, 178, 32, 6, 1], dtype=int)
DEFAULT_DROPOUT_RATES = numpy.array([0.5, 0.5, 0.5, 0.5, 0])
DEFAULT_INNER_ACTIV_FUNCTION_NAME = copy.deepcopy(RELU_FUNCTION_NAME)
DEFAULT_INNER_ACTIV_FUNCTION_ALPHA = 0.2
DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME = copy.deepcopy(SIGMOID_FUNCTION_NAME)
DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA = 0.
DEFAULT_L1_WEIGHT = 0.
DEFAULT_L2_WEIGHT = 0.001
PLATEAU_PATIENCE_EPOCHS = 5
PLATEAU_LEARNING_RATE_MULTIPLIER = 0.6
PLATEAU_COOLDOWN_EPOCHS = 0
EARLY_STOPPING_PATIENCE_EPOCHS = 10
LOSS_PATIENCE = 0.
DEFAULT_NUM_BOOTSTRAP_REPS = 1000
ORIGINAL_COST_KEY = 'orig_cost_estimates'
BEST_PREDICTORS_KEY = 'best_predictor_names'
BEST_COSTS_KEY = 'best_cost_matrix'
STEP1_PREDICTORS_KEY = 'step1_predictor_names'
STEP1_COSTS_KEY = 'step1_cost_matrix'
BACKWARDS_FLAG_KEY = 'is_backwards_test'
def _tabular_file_name_to_date(csv_file_name):
"""Parses date from name of tabular file.
:param csv_file_name: Path to input file.
:return: date_string: Date (format "yyyymmdd").
"""
pathless_file_name = os.path.split(csv_file_name)[-1]
date_string = pathless_file_name.replace(
'track_step_NCARSTORM_d01_', ''
).replace('-0000.csv', '')
# Verify.
time_string_to_unix(time_string=date_string, time_format=DATE_FORMAT)
return date_string
def _remove_future_data(predictor_table):
"""Removes future data from predictors.
:param predictor_table: pandas DataFrame with predictor values. Each row is
one storm object.
:return: predictor_table: Same but with fewer columns.
"""
predictor_names = list(predictor_table)
columns_to_remove = [p for p in predictor_names if 'future' in p]
return predictor_table.drop(columns_to_remove, axis=1, inplace=False)
def _lambdas_to_sklearn_inputs(lambda1, lambda2):
"""Converts lambdas to input arguments for scikit-learn.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: alpha: Input arg for scikit-learn model.
:return: l1_ratio: Input arg for scikit-learn model.
"""
return lambda1 + lambda2, lambda1 / (lambda1 + lambda2)
def _get_reliability_curve(actual_values, predicted_values, num_bins,
max_bin_edge, invert=False):
"""Computes reliability curve for one target variable.
E = number of examples
B = number of bins
:param actual_values: length-E numpy array of actual values.
:param predicted_values: length-E numpy array of predicted values.
:param num_bins: Number of bins (points in curve).
:param max_bin_edge: Value at upper edge of last bin.
:param invert: Boolean flag. If True, will return inverted reliability
curve, which bins by target value and relates target value to
conditional mean prediction. If False, will return normal reliability
curve, which bins by predicted value and relates predicted value to
conditional mean observation (target).
:return: mean_predictions: length-B numpy array of x-coordinates.
:return: mean_observations: length-B numpy array of y-coordinates.
:return: example_counts: length-B numpy array with num examples in each bin.
"""
max_bin_edge = max([max_bin_edge, numpy.finfo(float).eps])
bin_cutoffs = numpy.linspace(0., max_bin_edge, num=num_bins + 1)
bin_index_by_example = numpy.digitize(
actual_values if invert else predicted_values, bin_cutoffs, right=False
) - 1
bin_index_by_example[bin_index_by_example < 0] = 0
bin_index_by_example[bin_index_by_example > num_bins - 1] = num_bins - 1
mean_predictions = numpy.full(num_bins, numpy.nan)
mean_observations = numpy.full(num_bins, numpy.nan)
example_counts = numpy.full(num_bins, -1, dtype=int)
for i in range(num_bins):
these_example_indices = numpy.where(bin_index_by_example == i)[0]
example_counts[i] = len(these_example_indices)
mean_predictions[i] = numpy.mean(
predicted_values[these_example_indices]
)
mean_observations[i] = numpy.mean(actual_values[these_example_indices])
return mean_predictions, mean_observations, example_counts
def _add_colour_bar(
axes_object, colour_map_object, values_to_colour, min_colour_value,
max_colour_value, colour_norm_object=None,
orientation_string='vertical', extend_min=True, extend_max=True):
"""Adds colour bar to existing axes.
:param axes_object: Existing axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param values_to_colour: numpy array of values to colour.
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`,
defining the scale of the colour map. If `colour_norm_object is None`,
will assume that scale is linear.
:param orientation_string: Orientation of colour bar ("vertical" or
"horizontal").
:param extend_min: Boolean flag. If True, the bottom of the colour bar will
have an arrow. If False, it will be a flat line, suggesting that lower
values are not possible.
:param extend_max: Same but for top of colour bar.
:return: colour_bar_object: Colour bar (instance of
`matplotlib.pyplot.colorbar`) created by this method.
"""
if colour_norm_object is None:
colour_norm_object = matplotlib.colors.Normalize(
vmin=min_colour_value, vmax=max_colour_value, clip=False
)
scalar_mappable_object = pyplot.cm.ScalarMappable(
cmap=colour_map_object, norm=colour_norm_object
)
scalar_mappable_object.set_array(values_to_colour)
if extend_min and extend_max:
extend_string = 'both'
elif extend_min:
extend_string = 'min'
elif extend_max:
extend_string = 'max'
else:
extend_string = 'neither'
if orientation_string == 'horizontal':
padding = 0.075
else:
padding = 0.05
colour_bar_object = pyplot.colorbar(
ax=axes_object, mappable=scalar_mappable_object,
orientation=orientation_string, pad=padding, extend=extend_string,
shrink=0.8
)
colour_bar_object.ax.tick_params(labelsize=FONT_SIZE)
return colour_bar_object
def _get_points_in_roc_curve(observed_labels, forecast_probabilities):
"""Creates points for ROC curve.
E = number of examples
T = number of binarization thresholds
:param observed_labels: length-E numpy array of class labels (integers in
0...1).
:param forecast_probabilities: length-E numpy array with forecast
probabilities of label = 1.
:return: pofd_by_threshold: length-T numpy array of POFD (probability of
false detection) values.
:return: pod_by_threshold: length-T numpy array of POD (probability of
detection) values.
"""
assert numpy.all(numpy.logical_or(
observed_labels == 0, observed_labels == 1
))
assert numpy.all(numpy.logical_and(
forecast_probabilities >= 0, forecast_probabilities <= 1
))
observed_labels = observed_labels.astype(int)
binarization_thresholds = numpy.linspace(0, 1, num=1001, dtype=float)
num_thresholds = len(binarization_thresholds)
pofd_by_threshold = numpy.full(num_thresholds, numpy.nan)
pod_by_threshold = numpy.full(num_thresholds, numpy.nan)
for k in range(num_thresholds):
these_forecast_labels = (
forecast_probabilities >= binarization_thresholds[k]
).astype(int)
this_num_hits = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 1
))
this_num_false_alarms = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 0
))
this_num_misses = numpy.sum(numpy.logical_and(
these_forecast_labels == 0, observed_labels == 1
))
this_num_correct_nulls = numpy.sum(numpy.logical_and(
these_forecast_labels == 0, observed_labels == 0
))
try:
pofd_by_threshold[k] = (
float(this_num_false_alarms) /
(this_num_false_alarms + this_num_correct_nulls)
)
except ZeroDivisionError:
pass
try:
pod_by_threshold[k] = (
float(this_num_hits) / (this_num_hits + this_num_misses)
)
except ZeroDivisionError:
pass
pod_by_threshold = numpy.array([1.] + pod_by_threshold.tolist() + [0.])
pofd_by_threshold = numpy.array([1.] + pofd_by_threshold.tolist() + [0.])
return pofd_by_threshold, pod_by_threshold
def _get_points_in_perf_diagram(observed_labels, forecast_probabilities):
"""Creates points for performance diagram.
E = number of examples
T = number of binarization thresholds
:param observed_labels: length-E numpy array of class labels (integers in
0...1).
:param forecast_probabilities: length-E numpy array with forecast
probabilities of label = 1.
:return: pod_by_threshold: length-T numpy array of POD (probability of
detection) values.
:return: success_ratio_by_threshold: length-T numpy array of success ratios.
"""
assert numpy.all(numpy.logical_or(
observed_labels == 0, observed_labels == 1
))
assert numpy.all(numpy.logical_and(
forecast_probabilities >= 0, forecast_probabilities <= 1
))
observed_labels = observed_labels.astype(int)
binarization_thresholds = numpy.linspace(0, 1, num=1001, dtype=float)
num_thresholds = len(binarization_thresholds)
pod_by_threshold = numpy.full(num_thresholds, numpy.nan)
success_ratio_by_threshold = numpy.full(num_thresholds, numpy.nan)
for k in range(num_thresholds):
these_forecast_labels = (
forecast_probabilities >= binarization_thresholds[k]
).astype(int)
this_num_hits = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 1
))
this_num_false_alarms = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 0
))
this_num_misses = numpy.sum(numpy.logical_and(
these_forecast_labels == 0, observed_labels == 1
))
try:
pod_by_threshold[k] = (
float(this_num_hits) / (this_num_hits + this_num_misses)
)
except ZeroDivisionError:
pass
try:
success_ratio_by_threshold[k] = (
float(this_num_hits) / (this_num_hits + this_num_false_alarms)
)
except ZeroDivisionError:
pass
pod_by_threshold = numpy.array([1.] + pod_by_threshold.tolist() + [0.])
success_ratio_by_threshold = numpy.array(
[0.] + success_ratio_by_threshold.tolist() + [1.]
)
return pod_by_threshold, success_ratio_by_threshold
def _do_activation(input_values, function_name, slope_param=0.2):
"""Runs input array through activation function.
:param input_values: numpy array (any shape).
:param function_name: Name of activation function.
:param slope_param: Slope parameter (alpha) for activation function. Used
only for eLU and ReLU.
:return: output_values: Same as `input_values` but post-activation.
"""
assert function_name in ACTIVATION_FUNCTION_NAMES
input_object = K.placeholder()
if function_name == ELU_FUNCTION_NAME:
function_object = K.function(
[input_object],
[layers.ELU(alpha=slope_param)(input_object)]
)
elif function_name == RELU_FUNCTION_NAME:
function_object = K.function(
[input_object],
[layers.LeakyReLU(alpha=slope_param)(input_object)]
)
else:
function_object = K.function(
[input_object],
[layers.Activation(function_name)(input_object)]
)
return function_object([input_values])[0]
def _get_weight_regularizer(l1_weight, l2_weight):
"""Creates regularizer for neural-net weights.
:param l1_weight: L1 regularization weight. This "weight" is not to be
confused with those being regularized (weights learned by the net).
:param l2_weight: L2 regularization weight.
:return: regularizer_object: Instance of `keras.regularizers.l1_l2`.
"""
l1_weight = numpy.nanmax(numpy.array([l1_weight, 0.]))
l2_weight = numpy.nanmax(numpy.array([l2_weight, 0.]))
return keras.regularizers.l1_l2(l1=l1_weight, l2=l2_weight)
def _get_dense_layer(num_output_units, weight_regularizer=None):
"""Creates dense (fully connected) layer.
:param num_output_units: Number of output units (or "features" or
"neurons").
:param weight_regularizer: Will be used to regularize weights in the new
layer. This may be instance of `keras.regularizers` or None (if you
want no regularization).
:return: layer_object: Instance of `keras.layers.Dense`.
"""
return keras.layers.Dense(
num_output_units, activation=None, use_bias=True,
kernel_initializer=KERNEL_INITIALIZER_NAME,
bias_initializer=BIAS_INITIALIZER_NAME,
kernel_regularizer=weight_regularizer,
bias_regularizer=weight_regularizer
)
def _get_activation_layer(function_name, slope_param=0.2):
"""Creates activation layer.
:param function_name: Name of activation function.
:param slope_param: Slope parameter (alpha) for activation function. Used
only for eLU and ReLU.
:return: layer_object: Instance of `keras.layers.Activation`,
`keras.layers.ELU`, or `keras.layers.LeakyReLU`.
"""
assert function_name in ACTIVATION_FUNCTION_NAMES
if function_name == ELU_FUNCTION_NAME:
return keras.layers.ELU(alpha=slope_param)
if function_name == RELU_FUNCTION_NAME:
if slope_param <= 0:
return keras.layers.ReLU()
return keras.layers.LeakyReLU(alpha=slope_param)
return keras.layers.Activation(function_name)
def _get_dropout_layer(dropout_fraction):
"""Creates dropout layer.
:param dropout_fraction: Fraction of weights to drop.
:return: layer_object: Instance of `keras.layers.Dropout`.
"""
assert dropout_fraction > 0.
assert dropout_fraction < 1.
return keras.layers.Dropout(rate=dropout_fraction)
def _get_batch_norm_layer():
"""Creates batch-normalization layer.
:return: Instance of `keras.layers.BatchNormalization`.
"""
return keras.layers.BatchNormalization(
axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True
)
def _mkdir_recursive_if_necessary(directory_name=None, file_name=None):
"""Creates directory if necessary (i.e., doesn't already exist).
This method checks for the argument `directory_name` first. If
`directory_name` is None, this method checks for `file_name` and extracts
the directory.
:param directory_name: Path to local directory.
:param file_name: Path to local file.
"""
if directory_name is None:
directory_name = os.path.dirname(file_name)
if directory_name == '':
return
try:
os.makedirs(directory_name)
except OSError as this_error:
if this_error.errno == errno.EEXIST and os.path.isdir(directory_name):
pass
else:
raise
def apply_gaussian_filter(input_matrix, e_folding_radius_grid_cells):
"""Applies Gaussian filter to any-dimensional grid.
:param input_matrix: numpy array with any dimensions.
:param e_folding_radius_grid_cells: e-folding radius (num grid cells).
:return: output_matrix: numpy array after smoothing (same dimensions as
input).
"""
assert e_folding_radius_grid_cells >= 0.
return gaussian_filter(
input_matrix, sigma=e_folding_radius_grid_cells, order=0, mode='nearest'
)
def create_paneled_figure(
num_rows, num_columns, figure_width_inches=FIGURE_WIDTH_INCHES,
figure_height_inches=FIGURE_HEIGHT_INCHES,
horizontal_spacing=0.075, vertical_spacing=0., shared_x_axis=False,
shared_y_axis=False, keep_aspect_ratio=True):
"""Creates paneled figure.
This method only initializes the panels. It does not plot anything.
J = number of panel rows
K = number of panel columns
:param num_rows: J in the above discussion.
:param num_columns: K in the above discussion.
:param figure_width_inches: Width of the entire figure (including all
panels).
:param figure_height_inches: Height of the entire figure (including all
panels).
:param horizontal_spacing: Spacing (in figure-relative coordinates, from
0...1) between adjacent panel columns.
:param vertical_spacing: Spacing (in figure-relative coordinates, from
0...1) between adjacent panel rows.
:param shared_x_axis: Boolean flag. If True, all panels will share the same
x-axis.
:param shared_y_axis: Boolean flag. If True, all panels will share the same
y-axis.
:param keep_aspect_ratio: Boolean flag. If True, the aspect ratio of each
panel will be preserved (reflect the aspect ratio of the data plotted
therein).
:return: figure_object: Figure handle (instance of
`matplotlib.figure.Figure`).
:return: axes_object_matrix: J-by-K numpy array of axes handles (instances
of `matplotlib.axes._subplots.AxesSubplot`).
"""
figure_object, axes_object_matrix = pyplot.subplots(
num_rows, num_columns, sharex=shared_x_axis, sharey=shared_y_axis,
figsize=(figure_width_inches, figure_height_inches)
)
if num_rows == num_columns == 1:
axes_object_matrix = numpy.full(
(1, 1), axes_object_matrix, dtype=object
)
if num_rows == 1 or num_columns == 1:
axes_object_matrix = numpy.reshape(
axes_object_matrix, (num_rows, num_columns)
)
pyplot.subplots_adjust(
left=0.02, bottom=0.02, right=0.98, top=0.95,
hspace=horizontal_spacing, wspace=vertical_spacing
)
if not keep_aspect_ratio:
return figure_object, axes_object_matrix
for i in range(num_rows):
for j in range(num_columns):
axes_object_matrix[i][j].set(aspect='equal')
return figure_object, axes_object_matrix
def time_string_to_unix(time_string, time_format):
"""Converts time from string to Unix format.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param time_string: Time string.
:param time_format: Format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: unix_time_sec: Time in Unix format.
"""
return calendar.timegm(time.strptime(time_string, time_format))
def time_unix_to_string(unix_time_sec, time_format):
"""Converts time from Unix format to string.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param unix_time_sec: Time in Unix format.
:param time_format: Desired format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: time_string: Time string.
"""
return time.strftime(time_format, time.gmtime(unix_time_sec))
def find_tabular_files(directory_name, first_date_string, last_date_string):
"""Finds CSV files with tabular data.
:param directory_name: Name of directory with tabular files.
:param first_date_string: First date ("yyyymmdd") in range.
:param last_date_string: Last date ("yyyymmdd") in range.
:return: csv_file_names: 1-D list of paths to tabular files.
"""
first_time_unix_sec = time_string_to_unix(
time_string=first_date_string, time_format=DATE_FORMAT
)
last_time_unix_sec = time_string_to_unix(
time_string=last_date_string, time_format=DATE_FORMAT
)
csv_file_pattern = '{0:s}/track_step_NCARSTORM_d01_{1:s}-0000.csv'.format(
directory_name, DATE_FORMAT_REGEX
)
csv_file_names = glob.glob(csv_file_pattern)
csv_file_names.sort()
file_date_strings = [_tabular_file_name_to_date(f) for f in csv_file_names]
file_times_unix_sec = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings
], dtype=int)
good_indices = numpy.where(numpy.logical_and(
file_times_unix_sec >= first_time_unix_sec,
file_times_unix_sec <= last_time_unix_sec
))[0]
return [csv_file_names[k] for k in good_indices]
def read_tabular_file(csv_file_name):
"""Reads tabular data from CSV file.
:param csv_file_name: Path to input file.
:return: metadata_table: pandas DataFrame with metadata. Each row is one
storm object.
:return: predictor_table: pandas DataFrame with predictor values. Each row
is one storm object.
:return: target_table: pandas DataFrame with target values. Each row is one
storm object.
"""
predictor_table = | pandas.read_csv(csv_file_name, header=0, sep=',') | pandas.read_csv |
"""Test solvent-accessible surface area methods."""
import logging
import json
from pathlib import Path
import pytest
import yaml
from scipy.stats import linregress
import numpy as np
import pandas as pd
from osmolytes.sasa import SolventAccessibleSurface, ReferenceModels
from osmolytes.pqr import parse_pqr_file, Atom, aggregate, count_residues
_LOGGER = logging.getLogger(__name__)
with open("tests/data/alkanes/alkanes.json", "rt") as json_file:
ATOM_AREAS = json.load(json_file)
PROTEIN_PATH = Path("tests/data/proteins")
@pytest.mark.parametrize("radius", [0.25, 0.5, 1.0, 2.0, 4.0])
def test_one_sphere_sasa(radius, tmp_path):
"""Test solvent-accessible surface areas for one sphere."""
atom = Atom()
atom.position = np.random.randn(3)
frac = np.random.rand(1)[0]
atom.radius = frac * radius
probe_radius = (1.0 - frac) * radius
xyz_path = Path(tmp_path) / "sphere.xyz"
sas = SolventAccessibleSurface(
[atom], probe_radius, 200, xyz_path=xyz_path
)
atom_sasa = sas.atom_surface_area(0)
ref_sasa = 4.0 * np.pi * radius * radius
_LOGGER.info(
f"Radius: {radius}, Test area: {atom_sasa}, Ref area: {ref_sasa}"
)
np.testing.assert_almost_equal(atom_sasa, ref_sasa)
def two_sphere_area(radius1, radius2, distance):
"""Area of two overlapping spheres.
:param float radius1: radius of sphere1
:param float radius2: radius of sphere2
:param float distance: distance between centers of spheres
:returns: exposed areas of spheres
:rtype: (float, float)
"""
distsq = distance * distance
rad1sq = radius1 * radius1
rad2sq = radius2 * radius2
full_area1 = 4 * np.pi * rad1sq
full_area2 = 4 * np.pi * rad2sq
if distance > (radius1 + radius2):
return (full_area1, full_area2)
elif distance <= np.absolute(radius1 - radius2):
if full_area1 > full_area2:
return (full_area1, 0)
if full_area1 < full_area2:
return (0, full_area2)
else:
return (0.5 * full_area1, 0.5 * full_area2)
else:
if radius1 > 0:
cos_theta1 = (rad1sq + distsq - rad2sq) / (2 * radius1 * distance)
cap1_area = 2 * np.pi * radius1 * radius1 * (1 - cos_theta1)
else:
cap1_area = 0
if radius2 > 0:
cos_theta2 = (rad2sq + distsq - rad1sq) / (2 * radius2 * distance)
cap2_area = 2 * np.pi * radius2 * radius2 * (1 - cos_theta2)
else:
cap2_area = 0
return (full_area1 - cap1_area, full_area2 - cap2_area)
@pytest.mark.parametrize("radius", [0.0, 1.1, 2.2, 4.4, 6.6, 8.8])
def test_two_sphere_sasa(radius, tmp_path):
"""Test solvent accessible surface areas for two spheres."""
atom_tolerance = 0.02
total_tolerance = 0.02
probe_radius = 0.0
big_atom = Atom()
big_atom.radius = radius
big_atom.position = np.array([0, 0, 0])
little_atom = Atom()
little_atom.radius = 1.0
test_atom_areas = []
test_total_areas = []
ref_atom_areas = []
ref_total_areas = []
distances = np.linspace(0, (big_atom.radius + little_atom.radius), num=20)
for distance in distances:
_LOGGER.debug("Distance = %g", distance)
little_atom.position = np.array(3 * [distance / np.sqrt(3)])
xyz_path = Path(tmp_path) / f"spheres-{distance}.xyz"
sas = SolventAccessibleSurface(
[big_atom, little_atom], probe_radius, 300, xyz_path=xyz_path
)
test = np.array([sas.atom_surface_area(0), sas.atom_surface_area(1)])
test_total_areas.append(test.sum())
test_atom_areas.append(test)
ref = np.array(
two_sphere_area(big_atom.radius, little_atom.radius, distance)
)
ref_total_areas.append(ref.sum())
ref_atom_areas.append(ref)
test_atom_areas = np.array(test_atom_areas)
test_total_areas = np.array(test_total_areas)
ref_atom_areas = np.array(ref_atom_areas)
ref_total_areas = np.array(ref_total_areas)
rel_difference = np.absolute(
np.divide(test_atom_areas - ref_atom_areas, np.sum(ref_atom_areas))
)
errors = []
if np.any(rel_difference > atom_tolerance):
ref_series = pd.Series(index=distances, data=ref_total_areas)
ref_series.index.name = "Dist"
ref_df = | pd.DataFrame(index=distances, data=ref_atom_areas) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
| DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]) | pandas.DataFrame |
import warnings
from copy import deepcopy
from datetime import datetime, timedelta
from typing import Callable, Dict, List, Optional, Union
import dateutil
import pandas as pd
from dateutil import parser as dateparser
from github.Commit import Commit
from github.CommitStats import CommitStats
from github.GitAuthor import GitAuthor
from github.GitCommit import GitCommit
from github.GitRelease import GitRelease
from github.Issue import Issue
from github.NamedUser import NamedUser
from github.Repository import Repository
from github.Stargazer import Stargazer
from projectreport.analyzer.ts.base import TimeSeriesAnalysis
from projectreport.analyzer.ts.types import DictList
from projectreport.tools.monkey_patch_github import (
NoMorePagesAllowedException,
monkey_patch_github_obj_for_throttling,
)
from projectreport.version import (
add_major_minor_patch_changed_to_df,
add_major_minor_patch_to_df,
)
class GithubAnalysis(TimeSeriesAnalysis):
analysis_attrs = ["repo"]
def __init__(self, repo: Repository, auto_throttle: bool = True):
self.repo = deepcopy(repo)
self.auto_throttle = auto_throttle
if self.auto_throttle:
monkey_patch_github_obj_for_throttling(self.repo)
@property
def event_functions(self) -> Dict[str, Callable[[Repository], DictList]]:
funcs: Dict[str, Callable[[Repository], DictList]] = dict(
commits=commit_stats_from_repo,
issues=issue_stats_from_repo,
stars=stars_from_repo,
releases=releases_from_repo,
)
return funcs
@property
def count_functions(self) -> Dict[str, Callable[[DictList, str], DictList]]:
funcs: Dict[str, Callable[[DictList, str], DictList]] = dict(
commits=commit_loc_counts_from_commit_events,
issues=issue_counts_from_issue_events,
stars=star_counts_from_star_events,
releases=release_counts_from_release_events,
)
return funcs
def commit_stats_from_repo(repo: Repository, author_stats: bool = True) -> DictList:
all_data = []
commit: Commit
try:
for commit in repo.get_commits():
stats: CommitStats = commit.stats
author: Optional[Union[NamedUser, GitAuthor]] = _get_author_from_commit(
commit
)
committer: Optional[
Union[NamedUser, GitAuthor]
] = _get_committer_from_commit(commit)
data_dict = dict(
sha=commit.sha,
last_modified=dateparser.parse(commit.last_modified)
if commit.last_modified is not None
else None,
additions=stats.additions,
deletions=stats.deletions,
url=commit.html_url,
)
if author_stats:
if author is not None:
data_dict.update(_get_data_from_named_user_or_git_author(author))
if committer is not None:
data_dict.update(
_get_data_from_named_user_or_git_author(
committer, is_committer=True
)
)
all_data.append(data_dict)
except NoMorePagesAllowedException:
warnings.warn(
f"Could not collect full history for {repo.name} commits as Github "
f"limits the amount of history than can be pulled"
)
return all_data # type: ignore
def commit_loc_counts_from_commit_events(
commits: DictList, freq: str = "d"
) -> DictList:
event_df = pd.DataFrame(commits)
event_df["net"] = event_df["additions"] - event_df["deletions"]
event_df["change"] = event_df["additions"] + event_df["deletions"]
start = _get_end_of_period(event_df["last_modified"].min(), freq)
end = event_df["last_modified"].max()
dates = | pd.date_range(start=start, end=end, freq=freq) | pandas.date_range |
import numpy as np
import pytest
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.excel import ExcelWriter
from pandas.io.formats.excel import ExcelFormatter
@pytest.mark.parametrize(
"engine",
[
pytest.param(
"xlwt",
marks=pytest.mark.xfail(
reason="xlwt does not support openpyxl-compatible style dicts"
),
),
"xlsxwriter",
"openpyxl",
],
)
def test_styler_to_excel(engine):
def style(df):
# TODO: RGB colors not supported in xlwt
return DataFrame(
[
["font-weight: bold", "", ""],
["", "color: blue", ""],
["", "", "text-decoration: underline"],
["border-style: solid", "", ""],
["", "font-style: italic", ""],
["", "", "text-align: right"],
["background-color: red", "", ""],
["number-format: 0%", "", ""],
["", "", ""],
["", "", ""],
["", "", ""],
],
index=df.index,
columns=df.columns,
)
def assert_equal_style(cell1, cell2, engine):
if engine in ["xlsxwriter", "openpyxl"]:
pytest.xfail(
reason=(f"GH25351: failing on some attribute comparisons in {engine}")
)
# TODO: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
assert cell1.fill.__dict__ == cell2.fill.__dict__
assert cell1.font.__dict__ == cell2.font.__dict__
assert cell1.number_format == cell2.number_format
assert cell1.protection.__dict__ == cell2.protection.__dict__
def custom_converter(css):
# use bold iff there is custom style attached to the cell
if css.strip(" \n;"):
return {"font": {"bold": True}}
return {}
pytest.importorskip("jinja2")
pytest.importorskip(engine)
# Prepare spreadsheets
df = DataFrame(np.random.randn(11, 3))
with tm.ensure_clean(".xlsx" if engine != "xlwt" else ".xls") as path:
with ExcelWriter(path, engine=engine) as writer:
df.to_excel(writer, sheet_name="frame")
df.style.to_excel(writer, sheet_name="unstyled")
styled = df.style.apply(style, axis=None)
styled.to_excel(writer, sheet_name="styled")
| ExcelFormatter(styled, style_converter=custom_converter) | pandas.io.formats.excel.ExcelFormatter |
"""
The texthero.nlp module supports common NLP tasks such as named_entities, noun_chunks, ... on Pandas Series and DataFrame.
"""
import spacy
import pandas as pd
from texthero._types import TextSeries, InputSeries
@InputSeries(TextSeries)
def named_entities(s: TextSeries, package="spacy") -> pd.Series:
"""
Return named-entities.
Return a Pandas Series where each row contains a list of tuples
with information about the named entities in the row's document.
Tuple: (`entity'name`, `entity'label`, `starting character`,
`ending character`)
Under the hood, `named_entities` makes use of `Spacy name entity
recognition <https://spacy.io/usage/linguistic-features#named-entities>`_
List of labels:
- `PERSON`: People, including fictional.
- `NORP`: Nationalities or religious or political groups.
- `FAC`: Buildings, airports, highways, bridges, etc.
- `ORG` : Companies, agencies, institutions, etc.
- `GPE`: Countries, cities, states.
- `LOC`: Non-GPE locations, mountain ranges, bodies of water.
- `PRODUCT`: Objects, vehicles, foods, etc. (Not services.)
- `EVENT`: Named hurricanes, battles, wars, sports events, etc.
- `WORK_OF_ART`: Titles of books, songs, etc.
- `LAW`: Named documents made into laws.
- `LANGUAGE`: Any named language.
- `DATE`: Absolute or relative dates or periods.
- `TIME`: Times smaller than a day.
- `PERCENT`: Percentage, including ”%“.
- `MONEY`: Monetary values, including unit.
- `QUANTITY`: Measurements, as of weight or distance.
- `ORDINAL`: “first”, “second”, etc.
- `CARDINAL`: Numerals that do not fall under another type.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series("Yesterday I was in NY with <NAME>")
>>> hero.named_entities(s)[0] # doctest: +NORMALIZE_WHITESPACE
[('Yesterday', 'DATE', 0, 9), ('NY', 'GPE', 19, 21),
('<NAME>', 'PERSON', 27, 41)]
"""
entities = []
nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"])
# nlp.pipe is now 'ner'
for doc in nlp.pipe(s.astype("unicode").values, batch_size=32):
entities.append(
[(ent.text, ent.label_, ent.start_char, ent.end_char) for ent in doc.ents]
)
return pd.Series(entities, index=s.index)
@InputSeries(TextSeries)
def noun_chunks(s: TextSeries) -> pd.Series:
"""
Return noun chunks (noun phrases).
Return a Pandas Series where each row contains a tuple that has information
regarding the noun chunk.
Tuple: (`chunk'text`, `chunk'label`, `starting index`, `ending index`)
Noun chunks or noun phrases are phrases that have noun at their head or
nucleus i.e., they ontain the noun and other words that describe that noun.
A detailed explanation on noun chunks: https://en.wikipedia.org/wiki/Noun
phrase. Internally `noun_chunks` makes use of Spacy's dependency parsing:
https://spacy.io/usage/linguistic-features#dependency-parse
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series("The spotted puppy is sleeping.")
>>> hero.noun_chunks(s)
0 [(The spotted puppy, NP, 0, 17)]
dtype: object
"""
noun_chunks = []
nlp = spacy.load("en_core_web_sm", disable=["ner"])
# nlp.pipe is now "tagger", "parser"
for doc in nlp.pipe(s.astype("unicode").values, batch_size=32):
noun_chunks.append(
[
(chunk.text, chunk.label_, chunk.start_char, chunk.end_char)
for chunk in doc.noun_chunks
]
)
return pd.Series(noun_chunks, index=s.index)
@InputSeries(TextSeries)
def count_sentences(s: TextSeries) -> pd.Series:
"""
Count the number of sentences per cell in a Pandas Series.
Return a new Pandas Series with the number of sentences per cell.
This makes use of the SpaCy `sentencizer
<https://spacy.io/api/sentencizer>`_
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(
... ["Yesterday I was in NY with <NAME>. Great story...",
... "This is the F.B.I.! What? Open up!"])
>>> hero.count_sentences(s)
0 2
1 3
dtype: int64
"""
number_of_sentences = []
nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser", "ner"])
nlp.add_pipe(nlp.create_pipe("sentencizer")) # Pipe is only "sentencizer"
for doc in nlp.pipe(s.values, batch_size=32):
sentences = len(list(doc.sents))
number_of_sentences.append(sentences)
return | pd.Series(number_of_sentences, index=s.index) | pandas.Series |
# Copyright 2021 AI Singapore. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, List, Dict
import math
import pandas as pd
import dash
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from rarity.app import app
from rarity.data_loader import CSVDataLoader, DataframeLoader
from rarity.interpreters.structured_data import IntLossClusterer
from rarity.visualizers import loss_clusters as viz_clusters
from rarity.visualizers import shared_viz_component as viz_shared
from rarity.utils import style_configs
from rarity.utils.common_functions import (is_active_trace, is_reset, is_regression, is_classification,
detected_legend_filtration, detected_single_xaxis, detected_single_yaxis,
detected_bimodal, get_min_max_offset, get_min_max_cluster, get_effective_xaxis_cluster,
get_adjusted_dfs_based_on_legend_filtration, conditional_sliced_df, insert_index_col,
dataframe_prep_on_model_count_by_yaxis_slice, new_dataframe_prep_based_on_effective_index)
def fig_plot_offset_clusters_reg(data_loader: Union[CSVDataLoader, DataframeLoader], num_cluster: int):
'''
For use in regression task only.
Function to output collated info packs used to display final graph objects by cluster groups along with calculated silhouette scores
Arguments:
data_loader (:class:`~rarity.data_loader.CSVDataLoader` or :class:`~rarity.data_loader.DataframeLoader`):
Class object from data_loader module
num_cluster (int):
Number of cluster to form
Returns:
Compact outputs consist of the followings
- df (:obj:`~pd.DataFrame`): dataframes for overview visualization need with offset values included
- fig_obj_cluster (:obj:`~plotly.graph_objects.Figure`): figure displaying violin plot outlining cluster groups by offset values
- ls_cluster_score (:obj:`List[str]`): list of silhouette scores, indication of clustering quality
- fig_obj_elbow (:obj:`~plotly.graph_objects.Figure`): figure displaying line plot outlining the change in sum of squared distances \
along the cluster range
'''
df, ls_cluster_score, ls_cluster_range, ls_ssd = IntLossClusterer(data_loader).xform(num_cluster, None, 'All')
models = data_loader.get_model_list()
analysis_type = data_loader.get_analysis_type()
fig_obj_cluster = viz_clusters.plot_offset_clusters(df, analysis_type)
fig_obj_elbow = viz_clusters.plot_optimum_cluster_via_elbow_method(ls_cluster_range, ls_ssd, models)
return df, fig_obj_cluster, ls_cluster_score, fig_obj_elbow
def fig_plot_logloss_clusters_cls(data_loader: Union[CSVDataLoader, DataframeLoader],
num_cluster: int,
log_func: math.log = math.log,
specific_dataset: str = 'All'):
'''
For use in classification task only.
Function to output collated info packs used to display final graph objects by cluster groups along with calculated silhouette scores
Arguments:
data_loader (:class:`~rarity.data_loader.CSVDataLoader` or :class:`~rarity.data_loader.DataframeLoader`):
Class object from data_loader module
num_cluster (int):
Number of cluster to form
log_funct (:obj:`math.log`):
Mathematics logarithm function used to calculate log-loss between yTrue and yPred
specific_dataset (str):
Default to 'All' indicating to include all miss-predict labels. Other options flexibly expand depending on class labels
Returns:
Compact outputs consist of the followings
- ls_dfs_viz (:obj:`List[~pd.DataFrame]`): dataframes for overview visualization need with offset values included
- fig_obj_cluster (:obj:`~plotly.graph_objects.Figure`): figure displaying violin plot outlining cluster groups by offset values
- ls_cluster_score (:obj:`List[str]`): list of silhouette scores, indication of clustering quality
- fig_obj_elbow (:obj:`~plotly.graph_objects.Figure`): figure displaying line plot outlining the change in sum of squared distances \
along the cluster range
- ls_class_labels (:obj:`List[str]`): list of all class labels
- ls_class_labels_misspred (:obj:`List[str]`): list of class labels with minimum of 1 miss-prediction
- df_features (:obj:`~pandas.DataFrame`): dataframe storing all features used in dataset
'''
compact_outputs = IntLossClusterer(data_loader).xform(num_cluster, log_func, specific_dataset)
ls_dfs_viz, ls_class_labels, ls_class_labels_misspred = compact_outputs[0], compact_outputs[1], compact_outputs[2]
ls_cluster_score, ls_cluster_range, ls_ssd = compact_outputs[3], compact_outputs[4], compact_outputs[5]
df_features = data_loader.get_features()
analysis_type = data_loader.get_analysis_type()
models = data_loader.get_model_list()
fig_obj_cluster = viz_clusters.plot_logloss_clusters(ls_dfs_viz, analysis_type)
fig_obj_elbow = viz_clusters.plot_optimum_cluster_via_elbow_method(ls_cluster_range, ls_ssd, models)
return ls_dfs_viz, fig_obj_cluster, ls_cluster_score, fig_obj_elbow, ls_class_labels, ls_class_labels_misspred, df_features
def table_with_relayout_datapoints(data: dash_table.DataTable, customized_cols: List[str], header: Dict, exp_format: str):
'''
Create table outlining dataframe content
Arguments:
data (:obj:`~dash_table.DataTable`):
dictionary like format storing dataframe info under 'record' key
customized_cols (:obj:`List[str]`):
list of customized column names
header (:obj:`Dict`):
dictionary format storing the style info for table header
exp_format (str):
text info indicating the export format
Returns:
:obj:`~dash_table.DataTable`:
table object outlining the dataframe content with specific styles
'''
tab_obj = viz_shared.reponsive_table_to_filtered_datapoints(data, customized_cols, header, exp_format)
return tab_obj
def convert_cluster_relayout_data_to_df_reg(relayout_data: Dict, df: pd.DataFrame, models: List[str]):
'''
For use in regression task only.
Convert raw data format from relayout selection range by user into the correct df fit for viz purpose
Arguments:
relayout_data (:obj:`Dict`):
dictionary like data containing selection range indices returned from plotly graph
df (:obj:`~pandas.DataFrame`):
dataframe tap-out from interpreters pipeline
models (:obj:`List[str]`):
model names defined by user during spin-up of Tenjin app
Returns:
:obj:`~pandas.DataFrame`:
dataframe fit for the responsive table-graph filtering
'''
if detected_single_xaxis(relayout_data):
x_cluster = get_effective_xaxis_cluster(relayout_data)
df_filtered_x = df[df[f'cluster_{models[0]}'] == x_cluster]
if detected_bimodal(models):
df_filtered_x_m2 = df[df[f'cluster_{models[1]}'] == x_cluster]
df_filtered_x = pd.concat([df_filtered_x, df_filtered_x_m2]).drop_duplicates()
y_start_idx, y_stop_idx = get_min_max_offset(df_filtered_x, models)
df_final = dataframe_prep_on_model_count_by_yaxis_slice(df_filtered_x, models, y_start_idx, y_stop_idx)
elif detected_single_yaxis(relayout_data):
y_start_idx = relayout_data['yaxis.range[0]']
y_stop_idx = relayout_data['yaxis.range[1]']
df_filtered_y = dataframe_prep_on_model_count_by_yaxis_slice(df, models, y_start_idx, y_stop_idx)
x_start_idx, x_stop_idx = get_min_max_cluster(df_filtered_y, models, y_start_idx, y_stop_idx)
x_start_idx = x_start_idx if x_start_idx >= 1 else 1
x_stop_idx = x_stop_idx if x_stop_idx <= 8 else 8
condition_min_cluster = df_filtered_y[f'cluster_{models[0]}'] >= x_start_idx
condition_max_cluster = df_filtered_y[f'cluster_{models[0]}'] <= x_stop_idx
df_final = conditional_sliced_df(df_filtered_y, condition_min_cluster, condition_max_cluster)
if detected_bimodal(models):
condition_min_cluster_m2 = df_filtered_y[f'cluster_{models[1]}'] >= x_start_idx
condition_max_cluster_m2 = df_filtered_y[f'cluster_{models[1]}'] <= x_stop_idx
df_final_m2 = conditional_sliced_df(df_filtered_y, condition_min_cluster_m2, condition_max_cluster_m2)
df_final = pd.concat([df_final, df_final_m2]).drop_duplicates()
else: # a complete range is provided by user (with proper x-y coordinates)
x_cluster = get_effective_xaxis_cluster(relayout_data)
y_start_idx = relayout_data['yaxis.range[0]']
y_stop_idx = relayout_data['yaxis.range[1]']
df_filtered = df[df[f'cluster_{models[0]}'] == x_cluster]
if detected_bimodal(models):
df_filtered_m2 = df[df[f'cluster_{models[1]}'] == x_cluster]
df_filtered = | pd.concat([df_filtered, df_filtered_m2]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 15:33:07 2018
@author: ysye
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import minmax_scale
from math import log
from sklearn.metrics import roc_curve, auc
from scipy import stats
from matplotlib.colors import ListedColormap
from scipy.spatial import distance
#os.chdir('E:/Users/yusen/Project/Project3/Python code/CICRLET_package/src/CIRCLET')
from . import CIRCLET_DEFINE
from . import CIRCLET_CORE
bcolors_3=['#EF4F50','#587FBF','#CCCCCC']
bcolors_6=['#587FBF','#3FA667','#EF4F50','#FFAAA3','#414C50','#D3D3D3']
bcolors_12=['#CC1B62','#FBBC00','#0E8934','#AC1120','#EA7B00','#007AB7',
'#9A35B4','#804E1F' ,'#BEAB81','#D32414','#75AB09','#004084']
def change_index(passed_qc_sc_DF_cond,soft_add,software,UBI=['1CDU', '1CD_G1', '1CD_eS', '1CD_mS', '1CD_lS_G2']):
"""
which measures how frequent an
experimentally determined single cell labels changes along the time-series.
"""
#read order of single cell
if (software=='wishbone') | (software=='CIRCLET'):
phenotime=pd.read_table(soft_add,header=None,index_col=0)
phenotime.columns=['Pseudotime']
phenotime['cond']=passed_qc_sc_DF_cond
ordIndex=phenotime.sort_values(by='Pseudotime')
#cond_order=[cond for cond in ordIndex['cond'] if cond in UBI]
elif software=='multi-metric':
passed_qc_sc_DF=pd.read_table(soft_add,header=0,index_col=0)
phenotime=passed_qc_sc_DF[['ord','cond']]
ordIndex=phenotime.sort_values(by='ord')
cond_order=[cond for cond in ordIndex['cond'] if cond in UBI]
#generate penalty table
penal_table=np.ones((len(UBI),len(UBI)))
for loc in range(len(UBI)):
penal_table[loc,loc]=0
#if loc==0:
# penal_table[loc,1]=0
# penal_table[loc,2]=0
penal_table=(np.triu(penal_table)+np.triu(penal_table).T)
penalty_sum=0
sc_number=len(cond_order)
for k,cond in enumerate(cond_order):
phase1=UBI.index(cond)
phase2=UBI.index(cond_order[(k+1)%sc_number])
penalty_sum+=penal_table[phase1,phase2]
change_score=1-(penalty_sum-4)/(sc_number-4)
return change_score
def evaluate_continue_change(con_distri_features,soft_add,software):
#roc_auc_DF=evaluation_ranks(passed_qc_sc_DF_cond,soft_add,software,UBI=UBIs[1:5])
#plot_evaluate_heat(passed_qc_sc_DF_RO,soft_add,con_distri_features,software,UBIs)
if software=='multi-metric':
passed_qc_sc_DF=pd.read_table(soft_add,header=0,index_col=0)
phenotime=passed_qc_sc_DF[['ord']]
elif (software=='wishbone') | (software=='CIRCLET'):
phenotime=pd.read_table(soft_add,header=None,index_col=0)
phenotime.columns=['Pseudotime']
ordIndex=phenotime.sort_values(by='Pseudotime')
old_sc_name=ordIndex.index[-1]
sc_name=ordIndex.index[0]
corr_list=list()
for sc_name in ordIndex.index:
x=con_distri_features.loc[old_sc_name]
y=con_distri_features.loc[sc_name]
old_sc_name=sc_name
#temp=stats.pearsonr(x,y)[0]
#temp=distance.cosine(x,y)
#temp=np.abs(distance.cosine(x,y)-1)
temp=np.abs(distance.correlation(x,y)-1)
corr_list.append(temp)
evaluation_value=np.mean(corr_list)
#print(evaluation_value)
return evaluation_value
def computing_AUC(Rank_list):
"""
Compulating AUC
"""
y_true=Rank_list['bench']
y_score=np.max(Rank_list['Pseudotime'])-Rank_list['Pseudotime']
fpr,tpr,threshold = roc_curve(y_true,y_score)
roc_auc = auc(fpr, tpr)
if roc_auc<0.5:
roc_auc=1-roc_auc
#plt.plot(fpr,tpr)
return roc_auc
#soft_con_distri_Res_add=soft_add
def evaluation_ranks(passed_qc_sc_DF_cond,soft_con_distri_Res_add,software,UBI,key='not'):
"""
Calculate the AUC curve values according to the order of the rankings
between the two UBI pairs to obtain the distribution of AUC values.
"""
#UsingBatchIDs=['1CDU', '1CDX1', '1CDX2', '1CDX3', '1CDX4', '1CDES']
#UBIs=['1CDU', '1CD_G1', '1CD_eS', '1CD_mS', '1CD_lS_G2', 'NoSort']
if software=='multi-metric':
passed_qc_sc_DF= | pd.read_table(soft_con_distri_Res_add,header=0,index_col=0) | pandas.read_table |
import pandas as pd
from os import listdir
filenames = listdir("/home/debian/datalake/sirene/2020-08/rna/")
globdf = pd.DataFrame(columns=['id', 'id_ex', 'siret', 'rup_mi', 'gestion', 'date_creat', 'date_decla',
'date_publi', 'date_disso', 'nature', 'groupement', 'titre',
'titre_court', 'objet', 'objet_social1', 'objet_social2',
'adrs_complement', 'adrs_numvoie', 'adrs_repetition', 'adrs_typevoie',
'adrs_libvoie', 'adrs_distrib', 'adrs_codeinsee', 'adrs_codepostal',
'adrs_libcommune', 'adrg_declarant', 'adrg_complemid',
'adrg_complemgeo', 'adrg_libvoie', 'adrg_distrib', 'adrg_codepostal',
'adrg_achemine', 'adrg_pays', 'dir_civilite', 'siteweb', 'publiweb',
'observation', 'position', 'maj_time', 'mois_crea', 'mois_ferm',
'objet_social_parent_id', 'objet_social_lib', 'objet_social_parent_lib',
'reg', 'dep'])
dfnom = pd.read_csv("/home/debian/projects/postgres-playground/db/utils/rna_nomenclature_objet_social.csv",dtype=str)
dfnom2 = dfnom[dfnom['objet_social_parent_id'] == dfnom['objet_social_id']]
dfnom2 = dfnom2.rename(columns={'objet_social_lib':'objet_social_parent_lib'})
dfnom2 = dfnom2.drop(columns=['objet_social_id'])
dfnom = dfnom[dfnom['objet_social_parent_id'] != dfnom['objet_social_id']]
dfnom = pd.merge(dfnom,dfnom2,on='objet_social_parent_id',how='left')
dfnom = dfnom.rename(columns={'objet_social_id':'objet_social1'})
dfnom =dfnom.drop_duplicates(subset={"objet_social1"})
dfcom = pd.read_csv("/home/debian/projects/postgres-playground/db/utils/communes-2019.csv",dtype=str)
dfcom = dfcom[['com','reg','dep']]
dfcom = dfcom.rename(columns={'com':'adrs_codeinsee'})
dfcom = dfcom.drop_duplicates(subset={'adrs_codeinsee'})
for filename in filenames:
print(filename)
df = | pd.read_csv("/home/debian/datalake/sirene/2020-08/rna/rna_waldec_20200801_dpt_75.csv",sep=";",encoding="Latin-1",dtype=str) | pandas.read_csv |
'''
Regression test for #226.
'''
import os
import pandas as pd
import pytest
from parsl.app.app import bash_app, python_app
from parsl.tests.configs.local_threads import config
local_config = config
class Foo(object):
def __init__(self, x):
self.x = x
def __eq__(self, value):
raise NotImplementedError
bar = Foo(1)
@python_app
def get_foo_x(a, b=bar, c=None):
return b.x
data = | pd.DataFrame({'x': [None, 2, [3]]}) | pandas.DataFrame |
#Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def initialize_parameters_deep(layer_dims, n):
"""
This function takes the numbers of layers to be used to build our model as
input and otputs a dictonary containing weights and biases as parameters
to be learned during training
The number in the layer_dims corresponds to number of neurons in
corresponding layer
@params
Input to this function is layer dimensions
layer_dims = List contains number of neurons in one respective layer
and [len(layer_dims) - 1] gives L Layer Neural Network
Returns:
parameters = Dictionary containing parameters "W1", "b1", . . ., "WL", "bL"
where Wl = Weight Matrix of shape (layer_dims[l-1],layer_dims[l])
bl = Bias Vector of shape (1,layer_dims[l])
"""
# layers_dims = [250, 128, 128, 5] # 3-layer model
np.random.seed(3)
parameters = {}
L = len(layer_dims) # Number of layers in the network
for l in range(1, L): # It starts with 1 hence till len(layer_dims)
# Initialize weights randomly according to Xavier initializer in order to avoid linear model
parameters['W' + str(l)] = np.random.randn(layer_dims[l-1],layer_dims[l])*np.sqrt(n / layer_dims[l-1])
# Initialize bias vector with zeros
parameters['b' + str(l)] = np.zeros((1,layer_dims[l]))
# Making sure the shape is correct
assert(parameters['W' + str(l)].shape == (layer_dims[l-1], layer_dims[l]))
assert(parameters['b' + str(l)].shape == (1,layer_dims[l]))
# parameters = {"W [key]": npnp.random.randn(layer_dims[l-1],layer_dims[l]) [value]}
return parameters
# Activation functions and their derivaties:
def sigmoid(Z):
"""
This function takes the forward matrix Z (Output of the linear layer) as the
input and applies element-wise Sigmoid activation
@params
Z = numpy array of any shape
Returns:
A = Output of sigmoid(Z), same shape as Z, for the last layer this A is the
output value from our model
cache = Z is cached, this is useful during backpropagation
"""
A = 1/(1+np.exp(-Z)) # Using numpy apply sigmoid to Z
cache = Z # Cache the matrix Z
return A, cache
def relu(Z):
"""
This function takes the forward matrix Z as the input and applies element
wise Relu activation
@params
Z = Output of the linear layer, of any shape
Returns:
A = Post-activation parameter, of the same shape as Z
cache = Z is cached, this is useful during backpropagation
"""
A = np.maximum(0,Z) # Element-wise maximum of array elements
# Making sure shape of A is same as shape of Z
assert(A.shape == Z.shape)
cache = Z # Cache the matrix Z
return A, cache
def relu_backward(dA, cache):
"""
This function implements the backward propagation for a single Relu unit
@params
dA = post-activation gradient, of any shape
cache = Retrieve cached Z for computing backward propagation efficiently
Returns:
dZ = Gradient of the cost with respect to Z
"""
#Z = cache
dZ = np.array(dA) # Just converting dz to a correct object.
#print(dZ.all()==dA.all())
#print(dZ.shape, Z.shape)
# When z <= 0, you set dz to 0 as well, as relu sets negative values to 0
dZ[cache <= 0] = 0
# Making sure shape of dZ is same as shape of Z
assert (dZ.shape == cache.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
This function implements the backward propagation for a single Sigmoid unit
@params
dA = post-activation gradient, of any shape
cache = Retrieve cached Z for computing backward propagation efficiently
Returns:
dZ = Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z)) # Using numpy apply Sigmoid to Z
dZ = dA * s * (1-s) # This is derivatie of Sigmoid function
# Making sure shape of dZ is same as shape of Z
assert (dZ.shape == Z.shape)
return dZ
# Softmax
def softmax(Z):
"""
This fucntion caculates the softmax values element wise.
Here I've implemented a stable softmax function
@params
Z = Output of the linear layer, of any shape
Returns:
Elementwise exponential values of the matriz Z
"""
exp_val = np.exp(Z - np.max(Z, axis=1,keepdims=True))
softmax_vals = exp_val/np.sum(exp_val,axis=1,keepdims=True)
return softmax_vals
def softmax_loss(Z,y,act_cache):
"""
This function takes the forward matrix Z as the input and applies element
wise Softmax activation. It even calculates the cross entropy loss and
derivative of cross entropy loss function
@params
Z = Output of the linear layer, of any shape
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
Returns:
log_loss = Returns cross entropy loss: −∑ylog(probs)
dZ = Gradient of the cost with respect to Z
"""
# Forward Pass
# Here we'll implement a stable softmax
m = y.shape[0]
cache = Z
A = softmax(Z)
#Z,_ = act_cache
A_back = softmax(act_cache)
y = y.flatten()
log_loss = np.sum(-np.log(A[range(m), y]))/m
# Backward Pass
dZ = A_back.copy()
dZ[range(m), y] -= 1
dZ /= m
#dZ = (A - y)/m
assert(A.shape == Z.shape)
assert (dZ.shape == Z.shape)
return A, cache, log_loss, dZ
def linear_forward(A, W, b):
"""
This function implements the forward propagation equation Z = WX + b
@params
A = Activations from previous layer (or input data),
shape = (number of examples, size of previous layer)
W = Weight matrix of shape (size of previous layer,size of current layer)
b = Bias vector of shape (1, size of the current layer)
Returns:
Z = The input of the activation function, also called pre-activation
parameter, shape = (number of examples, size of current layer)
cache = Tuple containing "A", "W" and "b";
stored for computing the backward pass efficiently
"""
# print(A.shape, W.shape)
Z = A.dot(W) + b # Here b gets broadcasted
#print("Debug",Z.shape,A.shape[0],W.shape[1])
# Making sure shape of Z = (number of examples, size of current layer)
assert(Z.shape == (A.shape[0], W.shape[1]))
cache = (A, W, b) # Cache all the three params
return Z, cache
def linear_activation_forward(A_prev, W, b, y,keep_prob,predict_result,activation):
"""
This function implements forward propagation LINEAR -> ACTIVATION layer
@params
A_prev = Activations from previous layer (or input data),
shape = (number of examples, size of previous layer)
W = Weight matrix of shape (size of previous layer,size of current layer)
b = Bias vector of shape (1, size of the current layer)
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
keep_prob = Percentage of neurons to be kept active
predict_result = False while training, True when predicting the ground truth
values (False only when ground truth values are not present)
Must be kept False if you have ground truth values
while predicting
activation = The activation to be used in this layer,
stored as a text string: "sigmoid" or "relu"
Returns:
When activation is Sigmoid:
A = The output of the activation function, also called the post-activation
value
cache = Tuple containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
When activation is Softmax and Y is present during training and prediction:
A = The output of the activation function, also called the post-activation
value
cache = Tuple containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
log_loss = Cross ENtropy loss
dZ = Derivative of cross entropy softmax
When activation is Softmax and Y is not present during prediction:
Z = The input of the activation function, also called pre-activation
parameter, shape = (number of examples, size of current layer)
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache"
Z, linear_cache = linear_forward(A_prev, W, b)
A, activate_cache = sigmoid(Z)
D = np.ones((A.shape[0],A.shape[1]))
A = np.multiply(A,D)
activation_cache = (activate_cache,D)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache"
Z, linear_cache = linear_forward(A_prev, W, b)
A, activate_cache = relu(Z)
D = np.random.rand(A.shape[0],A.shape[1])
#print("Relu Function ",(A.shape, D.shape))
D = (D < keep_prob).astype(int)
#print("Relu D", D.shape)
A = np.multiply(A,D)
A /= keep_prob
activation_cache = (activate_cache,D)
#print("Relu Activation cache", len(activation_cache))
elif activation == "softmax":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache"
Z, linear_cache = linear_forward(A_prev, W, b)
#print("Z values",Z)
if predict_result: return Z
A, activate_cache, log_loss, dZ = softmax_loss(Z,y,Z.copy())
D = np.ones((A.shape[0],A.shape[1]))
#print("Softmax D", D.shape)
A = np.multiply(A,D)
activation_cache = (activate_cache,D)
#print("Softmax Activation cache", len(activation_cache))
#print("A values", A)
# Making sure shape of A = (number of examples, size of current layer)
assert (A.shape == (A_prev.shape[0],W.shape[1]))
cache = (linear_cache, activation_cache)
#print(cache)
if activation=="softmax":
return A, cache,log_loss,dZ
else:
return A, cache
def L_model_forward(X, parameters, y,Output_classes,keep_prob,predict_result,activation_type):
#print(y.shape)
"""
This function implements forward propagation as following:
[LINEAR->RELU]*(L-1) -> LINEAR -> SIGMOID computation
So we apply Relu to all the hidden layers and Sigmoid to the output layer
@params
X = Data, numpy array of shape (number of examples, number of features)
parameters = Output of initialize_parameters_deep() function
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
keep_prob = Percentage of neurons to be kept active
predict_result = False while training, True when predicting the ground truth
values (False only when ground truth values are not present)
Must be kept False if you have ground truth values
while predicting
activation_type = The activation to be used in this layer,
stored as a text string: "bianry" or "multiclass"
Returns:
When activation is Binary:
AL = last post-activation value, also rferred as prediction from model
caches = list of caches containing:
every cache of linear_activation_forward() function
(there are L-1 of them, indexed from 0 to L-1)
When activation is Mukticlass and Y is present during training and prediction:
A = The output of the activation function, also called the post-activation
value
cache = Tuple containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
log_loss = Cross Entropy loss
dZ = Derivative of cross entropy softmax
When activation is Multiclass and Y is not present during prediction:
Z = The input of the activation function, also called pre-activation
parameter, shape = (number of examples, size of current layer)
"""
#print(np.unique(y).shape[0])
caches = []
A = X
L = len(parameters) // 2 # Number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
# For hidden layers use Relu activation
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)],y,keep_prob, predict_result,activation='relu')
#print("Relu A",A.shape)
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
# For output layer use Sigmoid activation
if activation_type == "binary":
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)],y, keep_prob,predict_result,activation='sigmoid')
caches.append(cache)
# Making sure shape of AL = (number of examples, 1)
assert(AL.shape == (X.shape[0],1))
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
# For output layer use Sigmoid activation
elif activation_type == "multiclass":
if not predict_result:
AL, cache, log_loss, dZ = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)],y,keep_prob,predict_result,activation='softmax')
else:
Z = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)],y,keep_prob,predict_result,activation='softmax')
return Z
#print("AL",AL.shape)
caches.append(cache)
# Making sure shape of AL = (number of examples, number of classes)
assert(AL.shape == (X.shape[0],Output_classes))
#print("Softmax A", AL.shape)
if activation_type=="multiclass":
return AL, caches, log_loss, dZ
else:
return AL, caches
def compute_cost(AL, Y, parameters, lambd, log_loss, reg_type, activation_type):
"""
When activation is Sigmoid:
This function implements the Binary Cross-Entropy Cost along with l1/l2
regularization
For l1:
J = -(1/m)*(ylog(predictions)+(1−y)log(1−predictions)) + (λ/2*m)∑absolute(W)
For l2:
J = -(1/m)*(ylog(predictions)+(1−y)log(1−predictions)) + (λ/2*m)∑(W**2)
When activation is Softmax:
This function implements the Cross-Entropy Softmax Cost along with L2
regularization
For l1:
J = -(1/m)*(∑ylog(predictions)) + (λ/2*m)∑absolute(W)
For l2:
J = -(1/m)*(ylog(predictions)+(1−y)log(1−predictions)) + (λ/2*m)∑(W**2)
@params
AL = Probability vector corresponding to our label predictions
shape = (number of examples, 1)
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
parameters = Dictionary containing parameters as follwoing:
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
lambd = Regularization parameter, int
reg_type = Type of regularization to use "l1" or "l2"
activation_type = The activation to be used in this layer,
stored as a text string: "bianry" or "multiclass"
Returns:
cost = Binary or Softmax Cross-Entropy Cost with l1/l2 Regularizaion
"""
m = Y.shape[0] # Number of training examples
if activation_type=="binary":
# Compute loss from aL and y
cross_entropy_cost = -(1/m)*(np.dot(np.log(AL).T,Y) + np.dot(np.log(1-AL).T,(1-Y)))
#print(cost)
elif activation_type=="multiclass":
cross_entropy_cost = log_loss
reg_cost = []
W = 0
L = len(parameters) // 2 # number of layers in the neural network
if reg_type=="l2":
for l in range(1, L+1):
W = parameters["W" + str(l)]
reg_cost.append(lambd*1./(2*m)*np.sum(W**2))
elif reg_type=="l1":
for l in range(1, L+1):
W = parameters["W" + str(l)]
reg_cost.append(lambd*np.sum(abs(W)))
cross_entropy_cost = np.squeeze(cross_entropy_cost) # To make sure cost's is scalar (e.g. this turns [[cost]] into cost)
assert(cross_entropy_cost.shape == ())
cost = cross_entropy_cost + np.sum(reg_cost)
#print("Cost",(cost,log_loss))
return cost
def linear_backward(dZ,l_cache,keep_prob,lambd,reg_type):
"""
This function implements the linear portion of backward propagation for a
single layer (layer l)
@params
dZ = Gradient of the cost with respect to the linear output of current
layer l, shape = (number of examples, size of current layer)
cache = Tuple of values (A_prev, W, b) coming from the forward propagation
in the current layer
keep_prob = Percentage of neurons to be kept active
lambd = Regularization parameter, int
reg_type = Type of regularization to use "l1" or "l2"
Returns:
dA_prev = Gradient of the cost with respect to the activation of the
previous layer l-1,
same shape as A_prev(number of examples, size of previous layer)
dW = Gradient of the cost with respect to W of current layer l,
same shape as W(size of previous layer,size of current layer)
db = Gradient of the cost with respect to b of current layer l,
same shape as b(1,size of current layer)
"""
if reg_type=="l2":
A_prev, W, b = l_cache
#print("1 Softmax, 2 Relu W", W.shape)
#print("Backward A_prev for cache",A_prev.shape)
m = A_prev.shape[0] # Number of training examples
dW = (1/m)*np.dot(A_prev.T,dZ) + (1/m)*lambd*W # Derivative wrt Weights
db = (1/m)*np.sum(dZ, axis=0, keepdims=True) # Derivative wrt Bias
dA_prev = np.dot(dZ,W.T)
elif reg_type=="l1":
A_prev, W, b = l_cache
m = A_prev.shape[0] # Number of training examples
#if W.any()>0:
dW_pos = (W > 0)*lambd # wherever weights are positive(+)lambd from weights
dW_neg = (W < 0)*-lambd # wherever weights are negative(-)lambd from weights
dW = (1/m)*np.dot(A_prev.T,dZ) + (dW_pos + dW_neg)
db = (1/m)*np.sum(dZ, axis=0, keepdims=True) # Derivative wrt Bias
dA_prev = np.dot(dZ,W.T)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache,keep_prob, lambd, y, reg_type,first_layer, activation):
"""
This function implements backward propagation for LINEAR -> ACTIVATION layer
@params
dA = post-activation gradient for current layer l
cache = tuple of values (linear_cache, activation_cache)
we store for computing backward propagation efficiently
keep_prob = Percentage of neurons to be kept active
lambd = Regularization parameter, int
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
reg_type = Type of regularization to use "l1" or "l2"
first_layer = True only for first layer i.e. the input layer. It is True
because while unpacking the tuple cache it has only "Two" values
"linear" and "activation" cache, cached durinng forward pass.
For other layers it is False as it has to unpack "Four"values
of "linear" and "activation" cache, from current and next
layer during backward class (current and previous layer in
terms of forward pass)
activation = the activation to be used in this layer,
stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev = Gradient of the cost with respect to the activation of the
previous layer l-1,
same shape as A_prev(number of examples, size of previous layer)
dW = Gradient of the cost with respect to W of current layer l,
same shape as W(size of previous layer,size of current layer)
db = Gradient of the cost with respect to b of current layer l,
same shape as b(1,size of current layer)
"""
if activation == "relu":
if not first_layer:
# Unpacking Four Tuple Values from Cache
curr_l_a_cache, next_l_a_cache = cache
curr_linear_cache, curr_activation_cache = curr_l_a_cache
next_linear_cache, next_activation_cache = next_l_a_cache
Z,_ = curr_activation_cache
_,D = next_activation_cache
dZ = relu_backward(dA,Z)
dA_prev, dW, db = linear_backward(dZ, curr_linear_cache,keep_prob,lambd,reg_type)
dA_prev = np.multiply(dA_prev,D)
dA_prev /= keep_prob
else: #Unpacking Two Tuple Values from Cache
curr_linear_cache, curr_activation_cache = cache
Z,_ = curr_activation_cache
dZ = relu_backward(dA,Z)
dA_prev, dW, db = linear_backward(dZ, curr_linear_cache,keep_prob,lambd,reg_type)
elif activation == "sigmoid":
# Unpacking Four Tuple Values from Cache
curr_l_a_cache, next_l_a_cache = cache
curr_linear_cache, curr_activation_cache = curr_l_a_cache
next_linear_cache, next_activation_cache = next_l_a_cache
Z,_ = curr_activation_cache
_,D = next_activation_cache
#print("D",D.shape)
dZ = sigmoid_backward(dA,Z)
#print("dZ shape",(dZ.shape,D.shape))
dA_prev, dW, db = linear_backward(dZ, curr_linear_cache,keep_prob,lambd,reg_type)
dA_prev = np.multiply(dA_prev,D)
dA_prev /= keep_prob
#Z,_ = activation_cache
#dZ = sigmoid_backward(dA,Z)
#dA_prev, dW, db = linear_backward(dZ, linear_cache,activation_cache,keep_prob,lambd,reg_type)
elif activation == "softmax":
# Unpacking Four Tuple Values from Cache
curr_l_a_cache, next_l_a_cache = cache
curr_linear_cache, curr_activation_cache = curr_l_a_cache
next_linear_cache, next_activation_cache = next_l_a_cache
Z,_ = curr_activation_cache
_,D = next_activation_cache
#print("D",D.shape)
_,_,_,dZ = softmax_loss(dA, y, Z)
#print("dZ shape",(dZ.shape,D.shape))
dA_prev, dW, db = linear_backward(dZ, curr_linear_cache,keep_prob,lambd,reg_type)
dA_prev = np.multiply(dA_prev,D)
dA_prev /= keep_prob
#print("Softmax dA", dA_prev.shape)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches, keep_prob, lambd,reg_type, activation_type):
"""
This function implements the backward propagation as following:
[LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
@params
AL = probability vector, output of the L_model_forward function
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
caches = list of caches containing:
every cache of linear_activation_forward function with "relu"
(it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward function with "sigmoid"
(it's caches[L-1])
keep_prob = Percentage of neurons to be kept active
lambd = Regularization parameter, int
reg_type = Type of regularization to use "l1" or "l2"
activation_type = The activation to be used in this layer,
stored as a text string: "bianry" or "multiclass"
Returns:
grads = Dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l+1)] = ...
grads["db" + str(l+1)] = ...
"""
grads = {}
L = len(caches) # the number of layers
#print(L)
m = AL.shape[0] # Number of training examples
# Initializing the backpropagation
# Derivative of Binary Cross Entropy function
if activation_type=="binary":
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients.
# Inputs: "dAL, current_cache". Outputs: "grads["dAL-1"], grads["dWL"], grads["dbL"]
current_cache = (caches[L-1],caches[L-2]) # Grabbig correct dropout mask of the previous layer (wrt Forward pass)
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, keep_prob,lambd,Y,reg_type,first_layer=False,activation = "sigmoid")
elif activation_type=="multiclass":
#Y = Y.reshape(AL.shape)
#curr_cache = caches[L-2]
current_cache = (caches[L-1],caches[L-2]) # Grabbig correct dropout mask of the previous layer (wrt Forward pass)
#print("Softmax CC",len(current_cache))
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(AL, current_cache, keep_prob,lambd, Y,reg_type,first_layer=False,activation = "softmax")
#print("Softmax_grad",grads["dA"+str(L-1)])
# Loop from l=L-2 to l=0
for l in reversed(range(L-1)):
#print("l",l) #l = 1,0
# lth layer: (RELU -> LINEAR) gradients
# Inputs: "grads["dA" + str(l + 1)], current_cache". Outputs: "grads["dA" + str(l)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
if l != 0:
first_layer = False
current_cache = (caches[l], caches[l-1]) # Grabbig correct dropout mask of the previous layer (wrt Forward pass)
#print("Relu CC",len(current_cache))
elif l==0:
first_layer = True
current_cache = caches[l] # No dropout is appkied to the first/input layer
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache,keep_prob, lambd, Y,reg_type,first_layer,activation = "relu")
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
#print(grads)
return grads
def initialize_adam(parameters) :
"""
This function Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters
@param
parameters = Dictionary containing parameters as follwoing:
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v = Dictionary that will contain the exponentially weighted average of the gradient
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s = Dictionary that will contain the exponentially weighted average of the squared gradient
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
v["dW" + str(l+1)] = np.zeros(parameters['W' + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters['b' + str(l+1)].shape)
s["dW" + str(l+1)] = np.zeros(parameters['W' + str(l+1)].shape)
s["db" + str(l+1)] = np.zeros(parameters['b' + str(l+1)].shape)
return v, s
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
This function updates our model parameters using Adam
@params
parameters = Dictionary containing our parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads = Dictionary containing our gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v = Adam variable, moving average of the first gradient, python dictionary
s = Adam variable, moving average of the squared gradient, python dictionary
learning_rate = the learning rate, scalar.
beta1 = Exponential decay hyperparameter for the first moment estimates
beta2 = Exponential decay hyperparameter for the second moment estimates
epsilon = hyperparameter preventing division by zero in Adam updates
Returns:
parameters = Dictionary containing our updated parameters
v = Adam variable, moving average of the first gradient, python dictionary
s = Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
v["dW" + str(l+1)] = beta1 * v['dW' + str(l+1)] + (1 - beta1) * grads['dW' + str(l+1)]
v["db" + str(l+1)] = beta1 * v['db' + str(l+1)] + (1 - beta1) * grads['db' + str(l+1)]
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
v_corrected["dW" + str(l+1)] = v['dW' + str(l+1)] / float(1 - beta1**t)
v_corrected["db" + str(l+1)] = v['db' + str(l+1)] / float(1 - beta1**t)
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
s["dW" + str(l+1)] = beta2 * s['dW' + str(l+1)] + (1 - beta2) * (grads['dW' + str(l+1)]**2)
s["db" + str(l+1)] = beta2 * s['db' + str(l+1)] + (1 - beta2) * (grads['db' + str(l+1)]**2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / float(1 - beta2**t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / float(1 - beta2**t)
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * v_corrected["dW" + str(l+1)] / (np.sqrt(s_corrected["dW" + str(l+1)]) + epsilon)
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * v_corrected["db" + str(l+1)] / (np.sqrt(s_corrected["db" + str(l+1)]) + epsilon)
return parameters, v, s
def random_mini_batches(X, Y, mini_batch_size):
"""
This function creates a list of random minibatches from (X, Y)
@params
X = Data, numpy array of shape (number of examples, number of features)
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
mini_batch_size = size of the mini-batches (suggested to use powers of 2)
Returns:
mini_batches = list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(0)
m = X.shape[0] # Number of training examples
mini_batches = [] # List to return synchronous minibatches
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation,:]
#print("S_X",shuffled_X.shape)
shuffled_Y = Y[permutation].reshape((m,1))
#print("S_Y",shuffled_Y.shape)
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = (m//mini_batch_size) # number of mini batches of size mini_batch_size in our partitionning
for k in range(num_complete_minibatches):
mini_batch_X = shuffled_X[k*mini_batch_size : (k+1)*mini_batch_size,:]
#print("M_X",mini_batch_X.shape)
mini_batch_Y = shuffled_Y[k*mini_batch_size : (k+1)*mini_batch_size,:]
mini_batch = (mini_batch_X, mini_batch_Y) # Tuple for synchronous minibatches
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches*mini_batch_size :,: ]
mini_batch_Y = shuffled_Y[num_complete_minibatches*mini_batch_size :,: ]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def predict(X, parameters,y,Output_classes, keep_prob,predict_result,activation_type, flags):
"""
This function is used to predict the results of a L-layer neural network
@ params
X = Data, numpy array of shape (number of examples, number of features)
Y = Ground Truth/ True "label" vector (containing classes 0 and 1)
shape = (number of examples, 1)
parameters = Parameters of trained model returned by L_layer_model function
keep_prob = Percentage of neurons to be kept active
predict_result = False while training, True when predicting the ground truth
values (False only when ground truth values are present)
Must be kept False if you have ground truth values
while predicting
activation_type = The activation to be used in this layer,
stored as a text string: "bianry" or "multiclass"
flags = During prediction sometime we have grounnd truth values and
sometime we have to predict ground truth values using learned
parameters during training.
so flags is "y_is_present" or "predict_y"
Returns:
Predictions for the given dataset X
"""
m = X.shape[0] # Number of training examples in Dataset
n = len(parameters) // 2 # Number of layers in the neural network
if activation_type=="multiclass":
if flags=="y_is_present":
# Forward propagation
AL, _, _, _ = L_model_forward(X, parameters, y,Output_classes,keep_prob, predict_result,activation_type)
elif flags == "predict_y":
Z = L_model_forward(X, parameters, y,Output_classes,keep_prob, predict_result,activation_type)
AL = softmax(Z) # Apply stable Softmax
predicted_class = np.argmax(AL, axis=1) # Prediction
elif activation_type=="binary":
p = np.zeros((m,1))
#Forward Propagation
probas, _ = L_model_forward(X, parameters,y,Output_classes,keep_prob, predict_result,activation_type)
for i in range(probas.shape[0]):
# As per sigmoid, values greater than 0.5 are categorized as 1
# and values lesser than 0.5 as categorized as 0
if probas[i] > 0.5:
p[i] = 1
else:
p[i] = 0
if flags == "y_is_present" and activation_type=="multiclass":
#acc = np.sum((predicted_class == y)/m)*100
#print("Accuracy:%.2f%%" % acc)
#print('Accuracy: {0}%'.format(100*np.mean(predicted_class == y)))
return predicted_class
elif flags == "y_is_present" and activation_type=="binary":
y = y.reshape(p.shape)
acc = np.sum((p == y)/m)*100
print("Accuracy:%.2f%%" % acc)
return p
if flags == "predict_y" and activation_type=="multiclass":
ret = np.column_stack((y, predicted_class)).astype(int)
# Saving the Predictions as Multiclass_Predictions.csv
pd.DataFrame(ret).to_csv("Multiclass_Predictions.csv", sep = ",", header = ["Id", "label"], index = False)
return predicted_class
elif flags == "predict_y" and activation_type=="binary":
ret = np.column_stack((y, p)).astype(int)
# Saving the Predictions as Binary_Predictions.csv
| pd.DataFrame(ret) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
from helpers import (
austin_listings,
zc_link,
apply_clustering,
rating_clustering,
review_columns,
)
app = dash.Dash(__name__)
server = app.server
app.config.suppress_callback_exceptions = True
# CONSTANTS
grouped = austin_listings.groupby("zipcode").size()
mapbox_token = "<KEY>"
geo_colors = [
"#8dd3c7",
"#ffd15f",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
"#bc80bd",
"#ccebc5",
]
bar_coloway = [
"#fa4f56",
"#8dd3c7",
"#ffffb3",
"#bebada",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
"#bc80bd",
"#ccebc5",
"#ffed6f",
]
intro_text = """
**About this app**
This app applies spatial clustering and regionalization analysis to discover the [dataset of AirBnb listings in the
city of Austin](http://insideairbnb.com/get-the-data.html). Models are created using [pysal](https://pysal.readthedocs.io/en/latest/)
and scikit-learn.
Select the type of model from radioitem, click on the button to run clustering and visualize output regions geographically on the map, computing may take seconds to finish. Click
on regions on the map to update the number of airbnb listings from your highlighted group.
"""
listing_txt = """
out of *{}* total listings
""".format(
grouped.sum()
)
INIT_THRESHOLD_VAL = 5
def header_section():
return html.Div(
[
html.Img(src=app.get_asset_url("dash-logo.png"), className="logo"),
html.H4("Spatial Clustering"),
],
className="header__title",
)
def make_base_map():
# Scattermapbox with geojson layer, plot all listings on mapbox
customdata = list(
zip(
austin_listings["host_name"],
austin_listings["name"],
austin_listings["host_since"],
austin_listings["price"],
austin_listings["accommodates"],
austin_listings["availability_365"],
round(austin_listings["availability_365"] / 365 * 100, 1),
)
)
mapbox_figure = dict(
type="scattermapbox",
lat=austin_listings["latitude"],
lon=austin_listings["longitude"],
marker=dict(size=7, opacity=0.7, color="#550100"),
customdata=customdata,
name="Listings",
hovertemplate="<b>Host: %{customdata[0]}</b><br><br>"
"<b>%{customdata[1]}</b><br>"
"<b>Host Since: </b>%{customdata[2]}<br>"
"<b>Price: </b>%{customdata[3]} / night<br>"
"<b>Person to accommodate: </b>%{customdata[4]}<br>"
"<b>Yearly Availability: </b>%{customdata[5]} days/year (%{customdata[6]} %)",
)
layout = dict(
mapbox=dict(
style="streets",
uirevision=True,
accesstoken=mapbox_token,
zoom=9,
center=dict(
lon=austin_listings["longitude"].mean(),
lat=austin_listings["latitude"].mean(),
),
),
shapes=[
{
"type": "rect",
"xref": "paper",
"yref": "paper",
"x0": 0,
"y0": 0,
"x1": 1,
"y1": 1,
"line": {"width": 1, "color": "#B0BEC5"},
}
],
margin=dict(l=10, t=10, b=10, r=10),
height=900,
showlegend=True,
hovermode="closest",
)
figure = {"data": [mapbox_figure], "layout": layout}
return figure
def make_map_with_clustering(sel_ind, c_type, stored_data):
"""
Update layers on map from clustering regions.
:param sel_ind: lasso-select index from map.selectedData.
:param c_type: cluster type.
:param stored_data: datastore from computing.
:return: Plotly figure object.
"""
# Group based on zipcode
figure = make_base_map()
# Decrease opacity of scatter
figure["data"][0]["marker"].update(opacity=0.02)
figure["layout"].update(
dragmode="lasso"
) # clickmode doesn't work but drag will select scatters
db = pd.DataFrame()
if c_type == "ht-cluster":
db = pd.read_json(stored_data["ht"]["data"])
elif c_type == "rating-cluster":
db, p_val = | pd.read_json(stored_data["rt"]["data"]) | pandas.read_json |
import pandas as pd
bs = pd.read_csv('/home/federico/basf-dev/app/py/beneficios.csv')
df = pd.DataFrame()
for row, (k,v) in enumerate(bs.groupby(["Categoría"])):
df = df.append(v.drop_duplicates("Subcategoría"))
cols_to_del = [x for x in df.columns if x != 'Categoría' and x != 'Subcategoría']
df = df.drop(cols_to_del, 1)
df.to_csv('/home/federico/subcat_gen.csv', index = False, header = False)
bs = | pd.read_csv('/home/federico/basf-dev/app/py/beneficios.csv') | pandas.read_csv |
from datetime import datetime
from datetime import timedelta
import io
import os
from sqlalchemy import create_engine
import pandas as pd
import requests
def init_sql_conn():
user = "root"
host = os.getenv('MYSQL_HOST')
db = "covid"
engine = create_engine(f'mysql://{user}@{host}/{db}')
conn = engine.connect()
return conn
def scrape_from(conn, start_date, end_date=datetime.now().date()):
columns_to_drop = ["FIPS", "Lat", "Long_", "Latitude", "Longitude", "Combined_Key"]
column_rename = {
"Last Update": "report_date",
"Last_Update": "report_date",
"City": "city",
"Admin2": "city",
"Province/State": "province_state",
"Province_State": "province_state",
"Country/Region": "country",
"Country_Region": "country",
"Confirmed": "confirmed",
"Deaths": "deaths",
"Recovered": "recovered",
"Active": "active",
}
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"
table = "covid"
columns = ["report_date", "city", "province_state", "country", "confirmed", "deaths", "recovered", "active"]
df_all = pd.DataFrame(columns=columns)
curr_date = start_date
while curr_date <= end_date:
# pull in the csv file from url and convert to dataframe
date = datetime.strftime(curr_date, "%m-%d-%Y")
full_url = url+date+".csv"
resp = requests.get(full_url)
if resp.status_code != 200:
print(f'Got status code {resp.status_code} for {date}')
curr_date += timedelta(days=1)
continue
df = pd.read_csv(io.StringIO(resp.content.decode('utf-8')))
# drop columns that we're not interested in
for c in columns_to_drop:
if c in df.columns:
df = df.drop(c, axis=1)
# standardize the column names
df = df.rename(columns=column_rename)
# adds columns that may not be present due to file schema changes
for c in columns:
if c not in df.columns:
df[c] = 0
# try to convert report_date to a datetime
try:
df["report_date"] = | pd.to_datetime(df["report_date"]) | pandas.to_datetime |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from stockstats import StockDataFrame
import warnings
import traceback
warnings.filterwarnings('ignore')
import argparse
import re
import sys, os
sys.path.append(os.getcwd())
import os
import requests
from requests.exceptions import ConnectionError
import bs4
from bs4 import BeautifulSoup
from fastnumbers import isfloat
from fastnumbers import fast_float
from multiprocessing.dummy import Pool as ThreadPool
import more_itertools
from random import shuffle
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import seaborn as sns
sns.set_style('whitegrid')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mplt
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import matplotlib.dates as mdates
import seaborn as sns
import math
import gc
import ipaddress
from urllib.parse import urlparse
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from data_science_utils import dataframe as df_utils
from data_science_utils import models as model_utils
from data_science_utils.dataframe import column as column_utils
from data_science_utils.models.IdentityScaler import IdentityScaler as IdentityScaler
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix,classification_report
import lightgbm as lgb
np.set_printoptions(threshold=np.nan)
import pickle
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.metrics import accuracy_score
import missingno as msno
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
import datetime
from scipy import signal
import matplotlib.pyplot as plt
from datetime import timedelta
from sklearn import linear_model
from sklearn.metrics import roc_auc_score
from IPython.display import display, HTML
import warnings
warnings.filterwarnings('ignore')
from data_science_utils.misc import ffloat
from data_science_utils.misc import is_dataframe
from data_science_utils.misc import ffloat_list
from data_science_utils.misc import remove_multiple_spaces
from datetime import date, timedelta
def prev_weekday(adate):
if adate.weekday() <=4:
return adate
adate -= timedelta(days=1)
while adate.weekday() > 4: # Mon-Fri are 0-4
adate -= timedelta(days=1)
return adate
def get_ci(p,t,r):
return np.abs(np.fv(r/100,t,0,p))
def get_cumulative_amounts(p,t,r):
psum = p
for i in range(1,t):
psum = psum + get_ci(p,i,r)
return psum
def get_year_when_cumulative_profit_over_pe(pe,cpg):
if np.isnan(pe) or np.isnan(cpg):
return np.inf
for i in range(1,int(np.ceil(pe))):
if get_cumulative_amounts(1,i,cpg)>=pe:
return i
return int(np.ceil(pe))
def get_children(html_content):
return [item for item in html_content.children if type(item)==bs4.element.Tag or len(str(item).replace("\n","").strip())>0]
def get_portfolio(mfid):
url = "https://www.moneycontrol.com/india/mutualfunds/mfinfo/portfolio_holdings/" + mfid
page_response = requests.get(url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
portfolio_table = page_content.find('table', attrs={'class': 'tblporhd'})
fund_name = page_content.find('h1').text
return portfolio_table, fund_name
def get_table(portfolio_table):
portfolio_elems = get_children(portfolio_table)
table_data = list()
for row in portfolio_elems:
row_data = list()
row_elems = get_children(row)
for elem in row_elems:
text = elem.text.strip().replace("\n", "")
if len(text) == 0:
continue
elem_descriptor = {'text': text}
elem_children = get_children(elem)
if len(elem_children) > 0:
if elem_children[0].has_attr('href'):
elem_href = elem_children[0]['href']
elem_descriptor['href'] = elem_href
row_data.append(elem_descriptor)
table_data.append(row_data)
return table_data
def get_table_simple(portfolio_table, is_table_tag=True):
portfolio_elems = portfolio_table.find_all('tr') if is_table_tag else get_children(portfolio_table)
table_data = list()
for row in portfolio_elems:
row_data = list()
row_elems = get_children(row)
for elem in row_elems:
text = elem.text.strip().replace("\n", "")
text = remove_multiple_spaces(text)
if len(text) == 0:
continue
row_data.append(text)
table_data.append(row_data)
return table_data
def get_inner_texts_as_array(elem, filter_empty=True):
children = get_children(elem)
tarr = [child.text.strip().replace("\n", "") for child in children]
if filter_empty:
tarr = list(filter(lambda x: x is not None and len(x) > 0, tarr))
return tarr
def get_shareholding_pattern(shareholding_url):
page_response = requests.get(shareholding_url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
tables = page_content.find_all('table')
if len(tables) < 3:
return {}
table_content = page_content.find_all('table')[2]
rows = table_content.find_all('tr')
all_tds = page_content.find_all('td')
idx = list(map(lambda x: x.text, all_tds)).index("Total (A)+(B)+(C)")
promoters = get_inner_texts_as_array(
list(filter(lambda x: "Total shareholding of Promoter and Promoter Group (A)" in x.text, rows))[0],
filter_empty=False)
public = get_inner_texts_as_array(list(filter(lambda x: "Total Public shareholding (B)" in x.text, rows))[0],
filter_empty=False)
all_shares = get_inner_texts_as_array(
list(filter(lambda x: "Total (A)+(B)+(C)" in x.text, page_content.find_all('tr')))[0], filter_empty=False)
promoters_pledging = ffloat(promoters[7])
promoters = ffloat(promoters[5])
public = ffloat(public[5])
total_shares_count = ffloat(all_tds[idx + 2].text)
total_pledging = ffloat(all_tds[idx + 7].text)
return {"promoters": promoters, "public": public, "promoters_pledging": promoters_pledging,
"total_shares_count": total_shares_count, "total_pledging": total_pledging}
def get_fundholding_pattern(fundholding_url):
# Funds holding it or not Y
# Total funds holding currently N
# percent held by funds
# buys last quarter
# sells last quarter
# no change last quarter
# Total change in fund holding by money
# Total change in fund holding by percent shares
page_response = requests.get(fundholding_url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
results = {}
top_tab = page_content.text
# print(top_tab)
if "Not held by Mutual Funds in the last 6 quarters" in top_tab:
results['mf_holding'] = True
else:
results['mf_holding'] = False
bought = np.nan
sold = np.nan
hold = np.nan
if not results['mf_holding']:
bl = top_tab.split("Bought by")
if len(bl) == 2:
bought = ffloat(bl[1].strip().split(" ")[0])
sl = top_tab.split("Sold by")
if len(sl) == 2:
sold = ffloat(sl[1].strip().split(" ")[0])
hl = top_tab.split("No change in")
if len(hl) == 2:
hold = ffloat(hl[1].strip().split(" ")[0])
results['mf_bought'] = bought
results['mf_sold'] = sold
results['mf_hold'] = hold
six_quarter = page_content.find('div', attrs={'id': 'div_0'}).find('table', attrs={'class': 'tblfund2'}).find_all('tr')[-1]
six_quarter = ffloat_list(get_inner_texts_as_array(six_quarter)[1:])
results['mf_share_count'] = six_quarter[0]
results['mf_share_count_last_quarter_change'] = six_quarter[0] - six_quarter[1]
results['mf_six_quarter_share_count'] = six_quarter
return results
def get_ratios(url):
page_response = requests.get(url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
table_content = page_content.find_all('table', attrs={'class': 'table4'})[-1]
if "Data Not Available" in table_content.text:
return {}
dates_html = get_children(get_children(get_children(table_content)[0])[1])[1]
dates = get_inner_texts_as_array(dates_html)
ratios_htmls = get_children(get_children(get_children(get_children(table_content)[0])[1])[2])[1:]
rows = list(map(get_inner_texts_as_array, ratios_htmls))
ratios = {}
ratios['dates'] = dates
for row in rows:
if len(row) > 1:
ratios[row[0]] = ffloat_list(row[1:])
needed_keys = [('dates', 'ratios_dates'),
('Diluted EPS (Rs.)', 'ratios_diluted_eps'),
('Revenue from Operations/Share (Rs.)', 'ratios_revenue_per_share'),
('PBT/Share (Rs.)', 'ratios_pbt_per_share'),
('PBT Margin (%)', 'ratios_pbt_margin_per_share'),
('Total Debt/Equity (X)', 'ratios_de'),
('Asset Turnover Ratio (%)', 'ratios_asset_turnover_ratio'),
('Current Ratio (X)', 'ratios_cr'),
('EV/EBITDA (X)', 'ratios_ev_by_ebitda'),
('Price/BV (X)', 'ratios_pb'),
('MarketCap/Net Operating Revenue (X)','mcap/revenue'),
('Price/Net Operating Revenue','price/revenue')]
ratios = {your_key[1]: ratios[your_key[0]] if your_key[0] in ratios else [] for your_key in needed_keys}
return ratios
def get_min_and_three_year_from_screener(table):
min_value = np.inf
three_year_value = np.inf
for row in table:
if len(row)==2:
if row[0]=='3 Years:':
three_year_value = ffloat(row[1].replace('%',''))
cur_value = ffloat(row[1].replace('%',''))
min_value = min(min_value,cur_value)
return min_value,three_year_value
def get_quarterly_results(quarterly_results_table):
qrt = get_table_simple(quarterly_results_table)
qres = {}
qres['dates'] = qrt[0]
qres['sales'] = ffloat_list(qrt[1][1:])
qres['operating_profit'] = ffloat_list(qrt[3][1:])
qres['opm_percent'] = ffloat_list(qrt[4][1:])
qres['interest'] = ffloat_list(qrt[7][1:])
qres['pbt'] = ffloat_list(qrt[8][1:])
return qres
def get_annual_results(annual_results):
if annual_results is None:
return {}
qrt = get_table_simple(annual_results)
qres = {}
qres['dates'] = qrt[0]
qres['sales'] = ffloat_list(qrt[1][1:])
qres['operating_profit'] = ffloat_list(qrt[3][1:])
qres['opm_percent'] = ffloat_list(qrt[4][1:])
qres['interest'] = ffloat_list(qrt[6][1:])
qres['pbt'] = ffloat_list(qrt[8][1:])
qres['eps'] = ffloat_list(qrt[11][1:])
return qres
def get_balance_sheet(balance_sheet):
if balance_sheet is None:
return {}
qrt = get_table_simple(balance_sheet)
qres = {}
qres['dates'] = qrt[0]
qres['borrowings'] = ffloat_list(qrt[3][1:])
qres['fixed_assets'] = ffloat_list(qrt[6][1:])
qres['total_assets'] = ffloat_list(qrt[10][1:])
return qres
def get_cash_flows(cash_flows):
if cash_flows is None:
return {}
qrt = get_table_simple(cash_flows)
qres = {}
qres['dates'] = qrt[0]
qres['net_cash_flow'] = ffloat_list(qrt[4][1:])
return qres
def get_past_prices(sc_id):
bse_url = "https://www.moneycontrol.com/tech_charts/bse/his/%s.csv" % sc_id
nse_url = "https://www.moneycontrol.com/tech_charts/nse/his/%s.csv" % sc_id
past_prices_nse = pd.read_csv(nse_url, header=None, names=['open', 'high', 'low', 'close', 'volume', 1, 2, 3, 4])[
['open', 'high', 'low', 'close', 'volume']]
past_prices_nse.index = pd.to_datetime(past_prices_nse.index)
past_prices_bse = pd.read_csv(bse_url, header=None, names=['open', 'high', 'low', 'close', 'volume', 1, 2, 3, 4])[
['open', 'high', 'low', 'close', 'volume']]
past_prices_bse.index = pd.to_datetime(past_prices_bse.index)
ly = None
two_year_ago = None
three_year_ago = None
five_year_ago = None
past_prices = past_prices_bse
for i in range(12):
try:
if ly is None:
ly_t = pd.to_datetime(past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(364 + i, unit='d'))
ly = past_prices.loc[[ly_t]]
if two_year_ago is None:
two_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(730 + i, unit='d'))
two_year_ago = past_prices.loc[[two_year_ago_t]]
if three_year_ago is None:
three_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(1095 + i, unit='d'))
three_year_ago = past_prices.loc[[three_year_ago_t]]
if five_year_ago is None:
five_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(1825 + i, unit='d'))
five_year_ago = past_prices.loc[[five_year_ago_t]]
except Exception as e:
pass
past_prices = past_prices_nse
for i in range(12):
try:
if ly is None:
ly_t = pd.to_datetime(past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(364 + i, unit='d'))
ly = past_prices.loc[[ly_t]]
if two_year_ago is None:
two_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(730 + i, unit='d'))
two_year_ago = past_prices.loc[[two_year_ago_t]]
if three_year_ago is None:
three_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(1095 + i, unit='d'))
three_year_ago = past_prices.loc[[three_year_ago_t]]
if five_year_ago is None:
five_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - | pd.to_timedelta(1825 + i, unit='d') | pandas.to_timedelta |
#!/usr/bin/env python3
import os
import sys
import re
import pandas as pd, geopandas as gpd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib import Path
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
import rasterio
from rasterio import features as riofeatures
from rasterio import plot as rioplot
from shapely.geometry import Polygon
"""
Plot Rating Curves and Compare to USGS Gages
Parameters
----------
fim_dir : str
Directory containing FIM output folders.
output_dir : str
Directory containing rating curve plots and tables.
usgs_gages_filename : str
File name of USGS rating curves.
nwm_flow_dir : str
Directory containing NWM recurrence flows files.
number_of_jobs : str
Number of jobs.
stat_groups : str
string of columns to group eval metrics.
"""
def check_file_age(file):
'''
Checks if file exists, determines the file age, and recommends
updating if older than 1 month.
Returns
-------
None.
'''
file = Path(file)
if file.is_file():
modification_time = file.stat().st_mtime
current_time = time.time()
file_age_days = (current_time - modification_time)/86400
if file_age_days > 30:
check = f'{file.name} is {int(file_age_days)} days old, consider updating.\nUpdate with rating_curve_get_usgs_curves.py'
else:
check = f'{file.name} is {int(file_age_days)} days old.'
return check
# recurr_intervals = ['recurr_1_5_cms.csv','recurr_5_0_cms.csv','recurr_10_0_cms.csv']
def generate_rating_curve_metrics(args):
elev_table_filename = args[0]
branches_folder = args[1]
usgs_gages_filename = args[2]
usgs_recurr_stats_filename = args[3]
nwm_recurr_data_filename = args[4]
rc_comparison_plot_filename = args[5]
nwm_flow_dir = args[6]
catfim_flows_filename = args[7]
huc = args[8]
alt_plot = args[9]
elev_table = pd.read_csv(elev_table_filename,dtype={'location_id': object, 'feature_id':object,'HydroID':object, 'levpa_id':object})
elev_table.dropna(subset=['location_id'], inplace=True)
usgs_gages = pd.read_csv(usgs_gages_filename,dtype={'location_id': object, 'feature_id':object})
# Aggregate FIM4 hydroTables
hydrotable = pd.DataFrame()
for branch in elev_table.levpa_id.unique():
branch_elev_table = elev_table.loc[elev_table.levpa_id == branch].copy()
branch_hydrotable = pd.read_csv(join(branches_folder, str(branch), f'hydroTable_{branch}.csv'),dtype={'HydroID':object,'feature_id':object})
# Only pull SRC for hydroids that are in this branch
branch_hydrotable = branch_hydrotable.loc[branch_hydrotable.HydroID.isin(branch_elev_table.HydroID)]
branch_hydrotable.drop(columns=['order_'], inplace=True)
# Join SRC with elevation data
branch_elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
branch_hydrotable = branch_hydrotable.merge(branch_elev_table, on="HydroID")
# Append to full rating curve dataframe
if hydrotable.empty:
hydrotable = branch_hydrotable
else:
hydrotable = hydrotable.append(branch_hydrotable)
# Join rating curves with elevation data
#elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
#hydrotable = hydrotable.merge(elev_table, on="HydroID")
relevant_gages = list(hydrotable.location_id.unique())
usgs_gages = usgs_gages[usgs_gages['location_id'].isin(relevant_gages)]
usgs_gages = usgs_gages.reset_index(drop=True)
if len(usgs_gages) > 0:
# Adjust rating curve to elevation
hydrotable['elevation_ft'] = (hydrotable.stage + hydrotable.dem_adj_elevation) * 3.28084 # convert from m to ft
# hydrotable['raw_elevation_ft'] = (hydrotable.stage + hydrotable.dem_elevation) * 3.28084 # convert from m to ft
hydrotable['discharge_cfs'] = hydrotable.discharge_cms * 35.3147
usgs_gages = usgs_gages.rename(columns={"flow": "discharge_cfs", "elevation_navd88": "elevation_ft"})
hydrotable['source'] = "FIM"
usgs_gages['source'] = "USGS"
limited_hydrotable = hydrotable.filter(items=['location_id','elevation_ft','discharge_cfs','source', 'HydroID', 'levpa_id', 'dem_adj_elevation'])
select_usgs_gages = usgs_gages.filter(items=['location_id', 'elevation_ft', 'discharge_cfs','source'])
if 'default_discharge_cms' in hydrotable.columns: # check if both "FIM" and "FIM_default" SRCs are available
hydrotable['default_discharge_cfs'] = hydrotable.default_discharge_cms * 35.3147
limited_hydrotable_default = hydrotable.filter(items=['location_id','elevation_ft', 'default_discharge_cfs'])
limited_hydrotable_default['discharge_cfs'] = limited_hydrotable_default.default_discharge_cfs
limited_hydrotable_default['source'] = "FIM_default"
rating_curves = limited_hydrotable.append(select_usgs_gages)
rating_curves = rating_curves.append(limited_hydrotable_default)
else:
rating_curves = limited_hydrotable.append(select_usgs_gages)
# Add stream order
stream_orders = hydrotable.filter(items=['location_id','order_']).drop_duplicates()
rating_curves = rating_curves.merge(stream_orders, on='location_id')
rating_curves['order_'] = rating_curves['order_'].astype('int')
# NWM recurr intervals
recurr_intervals = ("2","5","10","25","50","100")
recurr_dfs = []
for interval in recurr_intervals:
recurr_file = join(nwm_flow_dir, 'nwm21_17C_recurr_{}_0_cms.csv'.format(interval))
df = pd.read_csv(recurr_file, dtype={'feature_id': str})
# Update column names
df = df.rename(columns={"discharge": interval})
recurr_dfs.append(df)
# Merge NWM recurr intervals into a single layer
nwm_recurr_intervals_all = reduce(lambda x,y: pd.merge(x,y, on='feature_id', how='outer'), recurr_dfs)
nwm_recurr_intervals_all = pd.melt(nwm_recurr_intervals_all, id_vars=['feature_id'], value_vars=recurr_intervals, var_name='recurr_interval', value_name='discharge_cms')
# Append catfim data (already set up in format similar to nwm_recurr_intervals_all)
cat_fim = pd.read_csv(catfim_flows_filename, dtype={'feature_id':str})
nwm_recurr_intervals_all = nwm_recurr_intervals_all.append(cat_fim)
# Convert discharge to cfs and filter
nwm_recurr_intervals_all['discharge_cfs'] = nwm_recurr_intervals_all.discharge_cms * 35.3147
nwm_recurr_intervals_all = nwm_recurr_intervals_all.filter(items=['discharge_cfs', 'recurr_interval','feature_id']).drop_duplicates()
# Identify unique gages
usgs_crosswalk = hydrotable.filter(items=['location_id', 'feature_id']).drop_duplicates()
usgs_crosswalk.dropna(subset=['location_id'], inplace=True)
nwm_recurr_data_table = pd.DataFrame()
usgs_recurr_data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Landscape Evaporative Response Index
This script will download all or update existing LERI files used in the app. It
uses the same FTP server as EDDI.
Production notes:
- LERI does not usually cover the full grid and only dates back to
2000, so maybe there would be space to experiment with a
different resolution?
- Also, LERI is not available for the same time periods as EDDI, SPI,
and SPEI. The monthly values are available for 1, 3, 7 and 12
month-windows.
- The 1- and 3-month files come out every month, the 7-month files only
once per year (January), and the 12-month files twice per year
(September and December). Not sure why this is, and it would throw
the time-period selection system off. Perhaps we start with just
the 1- and 3-month LERIs then brainstorm how to fit the others in.
- Also, these are netcdf files, so the process will be a blend of
Get_EDDI.py and Get_WWDT.py.
- I am sharing the temp folder with EDDI, so don't run the two at the
same time (Get_LERI and Get_EDDI).
Created on Mon Mar 18 09:47:33 2019
@author: User
"""
import datetime as dt
import ftplib
from glob import glob
from netCDF4 import Dataset
import numpy as np
import os
from osgeo import gdal
import pandas as pd
import sys
from tqdm import tqdm
import xarray as xr
if sys.platform == 'win32':
sys.path.insert(0, 'C:/Users/User/github/Ubuntu-Practice-Machine')
os.chdir('C:/Users/User/github/Ubuntu-Practice-Machine')
data_path = 'f:/'
elif 'travis' in os.getcwd():
os.chdir('/home/travis/github/Ubuntu-Practice-Machine')
data_path = ''
else:
sys.path.insert(0, '/root/Sync/Ubuntu-Practice-Machine')
os.chdir('/root/Sync/Ubuntu-Practice-Machine')
data_path = '/root/Sync'
from functions import toNetCDF, toNetCDFAlbers, toNetCDFPercentile, isInt
# These make output logs too noisy to see what happened
gdal.PushErrorHandler('CPLQuietErrorHandler')
os.environ['GDAL_PAM_ENABLED'] = 'NO'
# There are often missing epsg codes in the gcs.csv file, but proj4 works
proj = ('+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 ' +
'+ellps=GRS80 +datum=NAD83 +units=m no_defs')
# Get resolution from file call
try:
res = float(sys.argv[1])
except:
res = 0.25
# In[] Data Source and target directories
ftp_path = 'ftp://ftp.cdc.noaa.gov/Projects/LERI/CONUS_archive/data/'
temp_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/leri')
pc_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/percentiles')
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
if not os.path.exists(pc_folder):
os.makedirs(pc_folder)
# In[] Index Options
indices = ['leri1', 'leri3']
# In[] Define scraping routine
def getLERI(scale, date, temp_folder):
'''
The date in the file name always uses the first day of the month.
'''
year = date.year
month = date.month
file_name = 'LERI_{:02d}mn_{}{:02d}01.nc'.format(scale, year, month)
local_file = os.path.join(temp_folder, 'leri.nc')
with open(local_file, 'wb') as dst:
ftp.retrbinary('RETR %s' % file_name, dst.write)
return local_file
# In[] Today's date, month, and year
todays_date = dt.datetime.today()
today = np.datetime64(todays_date)
print("##")
print("#####")
print("############")
print("#######################")
print("#######################################")
print("####################################################")
print("\nRunning Get_LERI.py using a " + str(res) + " degree resolution:\n")
print(str(today) + '\n')
# In[] Get time series of currently available values
# Connect to FTP
ftp = ftplib.FTP('ftp.cdc.noaa.gov', 'anonymous', '<EMAIL>')
for index in indices:
ftp.cwd('/Projects/LERI/CONUS_archive/data/')
print('\n' + index)
original_path = os.path.join(data_path, "data/droughtindices/netcdfs/",
index + ".nc")
percentile_path = os.path.join(data_path,
"data/droughtindices/netcdfs/percentiles",
index + '.nc')
albers_path = os.path.join(data_path, "data/droughtindices/netcdfs/albers",
index + '.nc')
scale = index[-2:]
scale = int("".join([s for s in scale if isInt(s)]))
# Delete existing contents of temporary folder
temps = glob(os.path.join(temp_folder, "*"))
for t in temps:
os.remove(t)
####### If we are only missing some dates #################################
if os.path.exists(original_path):
with xr.open_dataset(original_path) as data:
dates = pd.DatetimeIndex(data.time.data)
data.close()
# Extract dates
d1 = dates[0]
d2 = dates[-1]
# Get a list of the dates already in the netcdf file
existing_dates = | pd.date_range(d1, d2, freq="M") | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 15:43:27 2019
@author: mikeriess
"""
import pandas as pd
import numpy as np
def InitialFormatting(df, maxcases, dateformat):
import pandas as pd
#Work on a subset:
casestoload = df["id"].unique().tolist()[0:maxcases]
df = df.loc[df["id"].isin(casestoload)]
# find cases to drop due to length
print("Cases before dropping len=1:",len(casestoload),"cases",len(df),"rows")
# Make function to apply to groups
def func(sub):
out = None
keepid = min(sub.id)
if len(sub) > 1:
out = keepid
return out
# Make list of cases above length 1
df_grp = df.groupby('id').apply(func)
#Remove NaNs from the list
keepers = df_grp.values
keepers = [i for i in keepers if i]
# Drop cases with only one event:
df = df.loc[df["id"].isin(keepers)]
print("Cases after dropping len=1:",len(keepers),"cases",len(df),"rows")
#Sort the dataframe by time aftewards
df['parsed_date'] = pd.to_datetime(df.time, format = dateformat, exact = True)
##########################################################################
print("Sorting by id, date (chronological order)")
#generate new ID column:
df = df.assign(id=(df['id']).astype('category').cat.codes)
df["id"] = df.id.astype('int32')
# Ensure ID starts at 1
if min(df.id) == 0:
df.id = df.id +1
# Sort the DF baed on caseid, and the date of the event
df = df.sort_values(['id',"parsed_date"], ascending=[True, True])
df = df.drop("parsed_date",axis=1)
return df
def GetFileInfo(df):
print("Number of cases in log:",len(df["id"].unique()))
import numpy as np
import pandas as pd
#Get the maximal trace length, for determining prefix length
max_length = np.max(df['id'].value_counts())
print("longest trace is:",max_length)
#Look at the time format:
print("Time format:",df["time"].loc[0])
print("Std. format: %Y-%m-%d %H:%M:%S")
print(df.head())
return max_length
def MakeSplitCriterion(df, trainsize=0.8, mode="event"):
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import time as tm
def datetime_range(start=None, end=None):
span = end - start
for i in range(span.days + 1):
yield start + timedelta(days=i)
#Parse date
df["time_parsed"] = pd.to_datetime(df["time"])
#Get min max dates:
earliest_date = min(df["time_parsed"])
lastest_date = max(df["time_parsed"])
#Find the date to divide on:
dates = list(datetime_range(start=earliest_date, end=lastest_date))
n_dates = len(dates)
splitpoint = n_dates*trainsize
splitpoint = int(np.round(splitpoint,decimals=0))
dividing_date = dates[splitpoint]
dividing_date = dividing_date
print("=======================================")
print("Log starts at:",earliest_date)
print("Last event starts at:",lastest_date)
print("Train-test split happens at:",dividing_date)
print("=======================================")
if mode=="event":
"""
Here we simply divide by date of the event,
and disregard that a case could be in both train and test set
this way
"""
df["trainset"] = df["time_parsed"] < dividing_date
df["trainset"].value_counts()
split_criterion = df[["id","trainset"]]
split_criterion = split_criterion.rename(columns={'id':'caseid',
'trainset':'trainset'}, inplace=False)
split_criterion = split_criterion.reset_index(drop=True)
split_criterion = split_criterion.drop_duplicates(subset="caseid",keep="first")
print(len(split_criterion["caseid"].unique().tolist()))
print(len(split_criterion))
print(np.sum(df["trainset"]*1))
print("=======================================")
if mode=="case":
"""
Here we remove all cases that are in both train and test set
"""
# For every case, verify if it has both True & False events
# If it has, drop that case ID
# And remember to print it
df["trainset"] = df["time_parsed"] < dividing_date
df["trainset"].value_counts()
split_criterion = df[["id","trainset"]]
split_criterion = split_criterion.rename(columns={'id':'caseid',
'trainset':'trainset'}, inplace=False)
split_criterion = split_criterion.reset_index(drop=True)
#Groupby and get count of every unique value per case id
validation = pd.DataFrame(split_criterion.groupby('caseid').trainset.nunique())
validation["caseid"] = validation.index
#If a caseid has both true and false within it (count == 2),
#it should be dropped.
print("=======================================")
print("Dropping cases that have events in both train + testsets:")
print("=======================================")
print("Cases before dropping:",len(validation["trainset"]))
validation["keep"] = validation["trainset"] == 1
validation = validation.loc[validation["keep"]==True]
print("Cases after dropping:",len(validation["trainset"]))
#list of caseids to keep
ids_keep = validation["caseid"]
#drop those thet should not be kept
print("Total events before:",len(split_criterion))
split_criterion = split_criterion.loc[split_criterion["caseid"].isin(ids_keep)]
print("Total events after:",len(split_criterion))
split_criterion = split_criterion.drop_duplicates(subset="caseid",keep="first")
print("=======================================")
print(len(split_criterion))
print(np.sum(split_criterion["trainset"]*1))
return split_criterion
def GenerateTrainData(df,
category_cols=[],
numeric_cols=[],
dateformat = "%Y-%m-%d %H:%M:%S",
droplastev=True,
drop_end_target=True,
get_activity_target=True,
get_case_features = True,
dummify_time_features = True,
max_prefix_length = 2,
window_position="last_k"):
#Make copy of df
data = df
#Subset only relevant variables
df = df[["id","time","event"]+category_cols+numeric_cols]
import time as tm
from datetime import datetime
import pandas as pd
import time
# Make new case ids: ##############################
cases = data["id"].unique().tolist()
newcaseids = list(range(0,len(cases)))
dictdf = pd.DataFrame([cases,newcaseids]).T
dictdf.columns =["id","newid"]
newdata = pd.merge(left=data,right=dictdf,on="id")
newdata.rename(columns={'id':'dropme',
'newid':'id'},
inplace=False).drop("dropme",axis=1)
# List all cases by their new id:
cases = data["id"].unique().tolist()
# Make new event ids: ##############################
evids = []
for i in cases:
subset = data.loc[data["id"] == i]
evids = evids + list(range(0,len(subset)))
evids = [x+1 for x in evids] # + 1 ###################################################### added +1
#set the new eventids
data["eventid"] = evids
#make a counter to keep status
num_cases = len(cases)
# Generate features case by case
for i in cases:
#iteration = iteration +1
print("case:",i, "of",num_cases)
#Look only at one caseid at a time
subset = data.loc[data["id"] == i]
subset.index = subset.eventid
"""
#######################################################################
PREFIX:
#######################################################################
"""
index1 = 0
#determine whether to start in the beginning or end of trace
if window_position == "last_k":
#if trace is smaller than desired prefix, just pick the full trace
if max_prefix_length > len(subset):
start = 1 #0
stop = len(subset) - index1 #
#If the the max prefix len is smaller than the actual trace,
#take the K last events (sliding window approach)
if max_prefix_length < len(subset):
start = len(subset) - max_prefix_length
stop = len(subset) - index1
#If max prefix is identical to trace len, start from one
if max_prefix_length == len(subset):
start = 1 #0
stop = len(subset) - index1
if window_position == "first_k":
#if trace is smaller than desired prefix len, just pick the full trace
if max_prefix_length > len(subset):
start = 1 #0
stop = len(subset) - index1 #
#If the the max prefix len is smaller than the actual trace,
#take the K FIRST events (sliding window approach)
if max_prefix_length < len(subset):
start = 1
stop = max_prefix_length - index1
#If max prefix is identical to trace len, start from one
if max_prefix_length == len(subset):
start = 1 #0
stop = len(subset) - index1
print("start",start,"stop",stop)
#Prefix capability: Subset k last events from trace
subset = subset.loc[start:stop]
#Make sure the data to be dummified also follows prefix convention
if i == 1:
datasub = subset
print("len subset:",len(subset))
print("len datasub:",len(datasub))
if i > 1:
datasub = | pd.concat([datasub, subset],axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
### FIXME in GH9424
raise nose.SkipTest(
"GH 9424; known failure read_csv with duplicate columns")
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one', dtype='f')], axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_usecols_implicit_index_col(self):
# #2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# #2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_pure_python_failover(self):
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = self.read_csv(StringIO(data), comment='#')
expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})
tm.assert_frame_equal(result, expected)
def test_decompression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(open(path, 'rb'), compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
# result = self.read_csv(open(path, 'rb'), compression='bz2')
# tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
if compat.PY3:
result = self.read_csv(fin, compression='bz2')
tm.assert_frame_equal(result, expected)
else:
self.assertRaises(ValueError, self.read_csv,
fin, compression='bz2')
def test_decompression_regex_sep(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
# Test currently only valid with the python engine because of
# regex sep. Temporarily copied to TestPythonParser.
# Here test for ValueError when passing regex sep:
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_memory_map(self):
# it works!
result = self.read_csv(self.csv1, memory_map=True)
def test_disable_bool_parsing(self):
# #2090
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = read_csv(StringIO(data), dtype=object)
self.assertTrue((result.dtypes == object).all())
result = read_csv(StringIO(data), dtype=object, na_filter=False)
self.assertEqual(result['B'][2], '')
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_custom_lineterminator(self):
data = 'a,b,c~1,2,3~4,5,6'
result = self.read_csv(StringIO(data), lineterminator='~')
expected = self.read_csv(StringIO(data.replace('~', '\n')))
tm.assert_frame_equal(result, expected)
data2 = data.replace('~', '~~')
result = self.assertRaises(ValueError, read_csv, StringIO(data2),
lineterminator='~~')
def test_raise_on_passed_int_dtype_with_nas(self):
# #2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
self.assertRaises(Exception, read_csv, StringIO(data), sep=",",
skipinitialspace=True,
dtype={'DOY': np.int64})
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
self.assertEqual(result['Date'][1], '2012-05-12')
self.assertTrue(result['UnitPrice'].isnull().all())
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = self.read_csv(StringIO(data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
expected = self.read_csv(StringIO(nice_data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = self.read_csv(StringIO(data), header=None,
names=lrange(50))
expected = self.read_csv(StringIO(data), header=None,
names=lrange(3)).reindex(columns=lrange(50))
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(self):
# #3453, this doesn't work with Python parser for some reason
data = ' a,b,c\r"a,b","e,d","f,f"'
result = self.read_csv(StringIO(data), header=None)
expected = self.read_csv(StringIO(data.replace('\r', '\n')),
header=None)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data))
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
def test_warn_if_chunks_have_mismatched_type(self):
# Issue #3866 If chunks are different types and can't
# be coerced using numerical types, then issue warning.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(DtypeWarning):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_invalid_c_parser_opts_with_not_c_parser(self):
from pandas.io.parsers import _c_parser_defaults as c_defaults
data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
engines = 'python', 'python-fwf'
for default in c_defaults:
for engine in engines:
kwargs = {default: object()}
with tm.assertRaisesRegexp(ValueError,
'The %r option is not supported '
'with the %r engine' % (default,
engine)):
read_csv(StringIO(data), engine=engine, **kwargs)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with C-unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_raise_on_sep_with_delim_whitespace(self):
# GH 6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with tm.assertRaises(TypeError):
pd.read_csv(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_table(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_fwf(StringIO(data), header=arg)
def test_multithread_stringio_read_csv(self):
# GH 11786
max_row_range = 10000
num_files = 100
bytes_to_df = [
'\n'.join(
['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
).encode() for j in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# Read all files in many threads
pool = ThreadPool(8)
results = pool.map(pd.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
def test_multithread_path_multipart_read_csv(self):
# GH 11786
num_tasks = 4
file_name = '__threadpool_reader__.csv'
num_rows = 100000
df = self.construct_dataframe(num_rows)
with tm.ensure_clean(file_name) as path:
df.to_csv(path)
final_dataframe = self.generate_multithread_dataframe(path,
num_rows,
num_tasks)
tm.assert_frame_equal(df, final_dataframe)
class TestMiscellaneous(tm.TestCase):
# for tests that don't fit into any of the other classes, e.g. those that
# compare results for different engines or test the behavior when 'engine'
# is not passed
def test_compare_whitespace_regex(self):
# GH 6607
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result_c = pd.read_table(StringIO(data), sep='\s+', engine='c')
result_py = pd.read_table(StringIO(data), sep='\s+', engine='python')
print(result_c)
tm.assert_frame_equal(result_c, result_py)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C-unsupported options with python-unsupported option
# (options will be ignored on fallback, raise)
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep=None,
delim_whitespace=False, dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep='\s', dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), skip_footer=1, dtype={'a': float})
# specify C-unsupported options without python-unsupported options
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep=None, delim_whitespace=False)
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep='\s')
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), skip_footer=1)
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
assert_same_values_and_dtype(result, expected)
assert_same_values_and_dtype(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), | long(2) | pandas.compat.long |
#!/usr/bin/env python
# coding: utf-8
# In[850]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import cross_val_score,KFold
from sklearn.neighbors import KNeighborsRegressor
# In[851]:
###DATA FRAMES###
# Supplied Dataset BAH #
power_df = pd.DataFrame.from_csv("energy_db.csv")
# GDP - World Bank Group #
gdp_df = pd.DataFrame.from_csv("gdp_power.csv")
# Population - World Bank Group #
population_df = pd.DataFrame.from_csv("population.csv")
# Hydropower Status Report - International Hydropower Association #
hydro_df = pd.DataFrame.from_csv("hydro_power.csv")
# Freedom in the World 2018 - Freedomhouse.org #
freedom_df = pd.DataFrame.from_csv("freedom.csv")
# Natural Disaster Probability - World Risk Report 2016 #
disasters_df = pd.DataFrame.from_csv("NaturalDisasterChance.csv")
# Nuclear Power Statistics - World Nuclear Association #
nuclear_df = pd.DataFrame.from_csv("Third World Nuclear Totals.csv")
# Wind Energy Statistics - World Energy Council #
wind_df = pd.DataFrame.from_csv("Wind energy - Wind energy.csv")
# Solar Energy Statistics - World Energy Council / World Bank Group #
solar_df = pd.DataFrame.from_csv("solar_potential - hydro_power.csv")
# Class Attributes for Training Data #
classattr_df = pd.DataFrame.from_csv("TrainClassAtributes.csv")
# In[852]:
# Remove irrelevant values #
power_df = power_df.drop(columns=["gppd_idnr", "owner", "source", "url", "geolocation_source", "year_of_capacity_data", "generation_gwh_2013", "generation_gwh_2014", "generation_gwh_2015", "generation_gwh_2016"])
# In[853]:
# Dataframe Merge #
# In[854]:
power_df = pd.merge(power_df, gdp_df, on=["country"])
# In[855]:
power_df = pd.merge(power_df, population_df, on=["country"])
# In[856]:
power_df = pd.merge(power_df, hydro_df, on=["country_long"])
# In[857]:
power_df = pd.merge(power_df, freedom_df, on=["country_long"])
# In[858]:
power_df = | pd.merge(power_df, disasters_df, on=["country_long"]) | pandas.merge |
import pandas as pd
from pandas import Series, DataFrame, Panel
import numpy as np
from datetime import datetime, date, time
import matplotlib.pyplot as plt
import matplotlib as mpl
t = pd.read_csv('ParkingCashlessDenorm.csv', header=None)
u = t.rename(columns= {0:"amount_paid",
1:"paid_duration_mins",
2:"start_date",
3:"start_day",
4:"end_date",
5:"end_day",
6:"start_time",
7:"end_time",
8:"DesignationType",
9:"Hours_of_Control",
10:"Tariff",
11:"Max_Stay",
12:"Spaces",
13:"Street",
14:"x_coordinate",
15:"y_coordinate",
16:"latitude",
17:"longitude"})
v = u[(u.Street == 'Devonshire Place') &
(pd.to_datetime(u.start_date) == pd.to_datetime('2013-02-28 00:00:00'))]
v['start_date'] = pd.to_datetime(v['start_date'])
v['end_date'] = pd.to_datetime(v['end_date'])
# make a datetime for the selected day at midnight
ts_now = | pd.to_datetime("2013-02-28 00:00:00") | pandas.to_datetime |
import pandas as pd
import numpy as np
from multiprocessing import Pool
import tqdm
import sys
import gzip as gz
from tango.prepare import init_sqlite_taxdb
def translate_taxids_to_names(res_df, reportranks, name_dict):
"""
Takes a pandas dataframe with ranks as columns and contigs as rows and taxids as values and translates taxids
to names column by column using a taxid->name dictionary
Parameters
----------
res_df: pandas.DataFrame
Results with taxids
reportranks: list
List of taxonomic ranks to report results for
name_dict: dictionary
Dictionary mapping taxids -> names
Returns
-------
res: pandas.DataFrame
Dataframe with names instead of taxids
"""
res = {}
for rank in reportranks:
res[rank] = [name_dict[taxid] for taxid in res_df.loc[:,rank]]
res = pd.DataFrame(res)
res.index = res_df.index
res = res.loc[:, reportranks]
return res
def get_thresholds(df, top=10):
"""
Here bit-score thresholds are calculated per query an returned in a dictionary.
The pandas DataFrame is first sorted by bitscore (high to low), then grouped by query, then for the first entry
per query the top% of the best hit is calculated and converted to dictionary.
Parameters
----------
df: pandas.DataFrame
DataFrame of diamond results
top: int
Percentage range of top bitscore
Returns
-------
thresholds: dict
Dictionary with queries as keys and bitscore thresholds as values
"""
thresholds = (df.sort_values("bitscore", ascending=False).groupby(level=0).first().bitscore * (
(100 - top)) / 100).to_dict()
return thresholds
def get_rank_thresholds(ranks, thresholds):
"""
Constructs dictionary of rank-specific thresholds
Parameters
----------
ranks: list
Taxonomic ranks to assign
thresholds: list
Thresholds for taxonomic ranks
Returns
-------
Dictionary of thresholds
"""
t_len, r_len = len(thresholds), len(ranks)
if t_len != r_len:
sys.exit("ERROR: Number of taxonomic ranks ({}) and number of thresholds ({}) differ\n".format(r_len, t_len))
return dict(zip(ranks, thresholds))
def add_names(x, taxid, ncbi_taxa):
"""
This function translates taxonomy ids to names. It operates per-row in the lineage dataframe.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ncbi_taxa: ete3.ncbi_taxonomy.ncbiquery.NCBITaxa
The ete3 sqlite database connection
Returns
-------
The original DataFrame merged with the taxa names
"""
# Get a names dictionary for all taxids in the row
names = ncbi_taxa.get_taxid_translator(list(x.loc[taxid].values) + [taxid])
n = {}
# Iterate ranks
for rank in list(x.columns):
# Get taxid for the current rank
t = x.loc[taxid, rank]
# If taxid is negative it means that there is no classified taxonomy at this rank
# Instead we get the last known name in the hierarchy. We can then use the negative values to translate into
# the name with the "Unclassified." prefix.
# If the name is 'root' we just use 'Unclassified'
if t < 0:
known_name = names[-t]
if known_name == "root":
name = "Unclassified"
else:
name = known_name
# If taxid is positive we just use the name from the dictionary
else:
name = names[t]
# Add name to a dictionary with keys in the form of {rank}.name
n["{}.name".format(rank)] = name
name_df = pd.DataFrame(n, index=[taxid])
return pd.merge(x, name_df, left_index=True, right_index=True)
def propagate_lower(x, taxid, ranks):
"""
Shift known ranks down through the taxonomic hierarchy.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ranks: list
Ranks used for assigning
Returns
-------
pandas.DataFrame updated with missing ranks
Some proteins in the database may map to a taxonomic rank above the lowest taxonomic rank that we are trying to
assign. For instance, if we use the ranks 'superkingdom phylum genus species' and a protein maps to a taxid at
rank phylum then we want to add the taxonomic information at the genus and species levels. This is done here by
adding the negative taxid of the lowest known rank to the lower ranks.
Example:
In the Uniref90 database the entry 'E1GVX1' maps to taxonomy id 838 (rank: genus, name: Prevotella).
When creating the lineage for taxid 838 we add '-838' to rank species.
"""
rev_ranks = [ranks[x] for x in list(range(len(ranks) - 1, -1, -1))]
missing = {}
known = taxid
for rank in rev_ranks[0:]:
if rank not in x.columns:
missing[rank] = -known
else:
known = x.loc[taxid, rank]
return pd.merge(x, pd.DataFrame(missing, index=[taxid]), left_index=True, right_index=True)
def get_lca(r, assignranks, reportranks):
"""
Assign lowest common ancestor from a set of taxids.
Parameters
----------
r: pandas.DataFrame
Results for a single query, extracted from the main diamond results file
assignranks: list
Taxonomic ranks to assign taxonomy for
reportranks: list
Taxonomic ranks to report taxonomy for
Returns
-------
a tuple of dictionaries with ranks as keys and taxa names/ids as values
This function takes a query-slice of the diamond results after filtering by score (and rank-threshold if tango mode
is 'rank_lca' or 'rank_vote'). It then iterates through each rank in reverse order checks how many unique taxids are
found at that rank. If there's only one taxid
"""
query = r.index.unique()[0]
# Reverse ranks for iterating
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
# Iterate through the assignranks
for rank in rev_ranks:
higher_ranks = reportranks[0:reportranks.index(rank) + 1]
higher_rank_names = ["{}.name".format(x) for x in higher_ranks]
# Count number of taxa at rank
c = r.groupby(rank).count()
# If there's only one taxa then we have found the LCA
if len(c) == 1:
if len(r) == 1:
lca_taxids = r.loc[query, higher_ranks].values
else:
lca_taxids = r.loc[query, higher_ranks].values[0]
return dict(zip(higher_ranks, lca_taxids))
return {}
def parse_with_rank_thresholds(r, assignranks, reportranks, rank_thresholds, mode, vote_threshold):
"""Assigns taxonomy using rank_specific thresholds
The ranks used to assign taxonomy are iterated in reverse (e.g. species, genus, phylum),
at each rank results are filtered by the corresponding rank threshold,
if no hits remain after filtering the next rank is evaluated,
Then, if mode=='rank_lca', for remaining hits, a lowest common ancestor is calculated from all remaining taxids.
However, if mode=='rank_vote', taxids are counted among the remaining hits and all results matching taxids
that occur more than vote_threshold are used to determine the lowest common ancestor.
If a taxonomy can be assigned at a rank, it is returned directly. If no taxonomy can be assigned at any of the
ranks, empty results are returned.
Parameters
----------
r: pandas.DataFrame
Dataframe slice for a query
assignranks: list
Taxonomic ranks used to assign taxonomy
reportranks: list
Taxonomic ranks at which taxonomy is reported
rank_thresholds: dict
Dictionary of rank_specific thresholds
mode: str
'rank_lca' or 'rank_vote'
vote_threshold: float
Cutoff used to filter out common taxids
Returns
-------
tuple
Dictionaries with taxonomy names and taxonomy ids at each rank
"""
# Start from lowest rank
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
for rank in rev_ranks:
# Make sure that LCA is not set below current rank
allowed_ranks = assignranks[0:assignranks.index(rank) + 1]
# Get rank threshold
threshold = rank_thresholds[rank]
# Filter results by rank threshold
try:
_r = r.loc[r.pident >= threshold]
except KeyError:
continue
if len(_r) == 0:
continue
lca_taxids = {}
# After filtering, either calculate lca from all filtered taxids
if mode == "rank_lca":
lca_taxids = get_lca(_r, allowed_ranks, reportranks)
# Or at each rank, get most common taxid
elif mode == "rank_vote":
vote = get_rank_vote(_r, rank, vote_threshold)
if len(vote) > 0:
lca_taxids = get_lca(vote, allowed_ranks, reportranks)
if len(lca_taxids.keys()) > 0:
return lca_taxids
return {}
def get_rank_vote(r, rank, vote_threshold=0.5):
"""
Filter results based on fraction of taxa
Parameters
----------
r: pandas.DataFrame
Results for a single query, after filtering with bitscore and rank-specific thresholds
rank: str
Current rank being investigated
vote_threshold: float
Required fraction of hits from a single taxa in order to keep taxa
Returns
-------
Filtered dataframe only containing taxa that meet vote_threshold
Here taxa are counted among all hits remaining for a query after filtering using bitscore and rank-specific
thresholds. Taxa are counted at a certain rank and counts are normalized. Hits belonging to taxa above
vote_threshold are kept while others are filtered out.
"""
# Create dataframe for unique taxids filtered at this rank threshold
taxid_counts = pd.DataFrame(dict.fromkeys(r.staxids.unique(), 1), index=["count"]).T
# Add taxid for rank being investigated
rank_df = r.groupby("staxids").first().reset_index()[[rank, "staxids"]].set_index("staxids")
rank_df = pd.merge(taxid_counts, rank_df, left_index=True, right_index=True)
# Sum counts for current rank
rank_sum = rank_df.groupby(rank).sum()
rank_norm = rank_sum.div(rank_sum.sum())
rank_norm = rank_norm.sort_values("count", ascending=False)
votes = rank_norm.loc[rank_norm["count"] > vote_threshold]
if len(votes) > 0:
return r.loc[r[rank].isin(votes.index)]
return []
def propagate_taxids(res, ranks):
"""
Transfer taxonomy ids to unassigned ranks based on best known taxonomy
Example:
{'species': -1, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
should become
{'species': -171549, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
Parameters
----------
res: dict
Dictionary of ranks and taxonomy ids
ranks: list
Ranks to assign taxonomy to
Returns
-------
res: dict
Dictionary with updated taxonomy ids
"""
known = -1
for rank in ranks:
# If not -1 (Unclassified) at rank, store assignment as known
if res[rank] != -1:
known = res[rank]
continue
# If -1 at rank (Unclassified), add the taxid with the '-' prefix
if res[rank] == -1:
res[rank] = -abs(known)
return res
def series2df(df):
"""Converts pandas series to pandas dataframe"""
if str(type(df)) == "<class 'pandas.core.series.Series'>":
df = pd.DataFrame(df).T
return df
def read_taxidmap(f, ids):
"""
Reads the protein to taxid map file and stores mappings
Parameters
----------
f: str
Input file with protein_id->taxid map
ids: list
Protein ids to store taxids for
Returns
-------
Dictionary of protein ids to taxid and all unique taxids
"""
taxidmap = dict.fromkeys(ids, -1)
open_function = open
if ".gz" in f:
open_function = gz.open
with open_function(f, 'rt') as fhin:
for line in tqdm.tqdm(fhin, desc="Reading idmap {}".format(f), ncols=100, unit=" lines"):
items = (line.rstrip()).rsplit()
# If file has only two columns, assume taxid in second
if len(items) == 2:
protid, taxid = items
# Otherwise, assume format is same as NCBI protein mapping
else:
protid, taxid = items[0], items[2]
# Add map to dictionary
# We initialize the dictionary with -1 so we make an attempt to add the taxid + 1
# If the protid is not in the dictionary we skip it
try:
taxidmap[protid] += int(taxid) + 1
except KeyError:
continue
except ValueError:
continue
return pd.DataFrame(taxidmap, index=["staxids"]).T, list(set(taxidmap.values()))
def read_df(infile, top=10, e=0.001, input_format="tango", taxidmap=None):
"""
Reads the blast results from file and returns a dictionary with query->results.
Note that the input is assumed to be sorted by bitscore for each query. The first entry for a query is used to set
the score threshold for storing hits for that query. So if a query has a bitscore of 100 and --top 10 is specified
then we only store subsequent hits that have a bitscore of at least (100-0.1*100) = 90.
Tango-formatted output contains two additional compared to the standard blast format 6:
query1 subject1 93.6 47 3 0 146 6 79 125 8.5e-16 91.3 314295
query1 subject2 100.0 44 0 0 137 6 484 527 2.5e-15 89.7 9347
query2 subject3 53.5 241 84 2 645 7 15 255 1.3e-53 216.9 864142
where the last column is the taxid of the subject.
Otherwise the output may have the typical blast format 6 output.
Parameters
----------
infile: str
Arguments from argument parser
top: int
Keep results within top% of best bitscore
e: float
Maximum allowed e-value to keep a hit.
input_format: str
Blast format. 'tango' if taxid for each subject is present in blast results, otherwise 'blast'
taxidmap: str
File mapping each subject id to a taxid
Returns
-------
tuple
The function returns a tuple with dictionary of query->results and
unique taxonomy ids (if tango format) or unique subject ids
"""
open_function = open
if ".gz" in infile:
open_function = gz.open
r = {}
taxids = []
queries = {}
with open_function(infile, 'rt') as fhin:
for line in tqdm.tqdm(fhin, desc="Reading {}".format(infile), ncols=100, unit=" lines"):
items = line.rstrip().rsplit()
query, subject, pident, evalue, score = items[0], items[1], float(items[2]), \
float(items[10]), float(items[11])
try:
min_score = queries[query]['min_score']
except KeyError:
min_score = score * ((100 - top) / 100)
queries[query] = {'min_score': min_score}
if score < min_score or evalue > e:
continue
if input_format == "tango" and len(items) > 12:
taxid = items[12]
taxids.append(taxid)
# TODO: Is there a way to skip storing the same taxid from a worse hit for the same query
elif input_format == "blast" and len(items) == 12:
taxid = ""
if not taxidmap:
sys.exit(
"ERROR: Standard blast input detected with no protein -> taxid file specified (--taxidmap).")
else:
continue
# Add results for query to dictionary
try:
r[query] += [[subject, pident, evalue, score, int(taxid)]]
except KeyError:
r[query] = [[subject, pident, evalue, score, int(taxid)]]
# If this is blast format then we return all subject ids found
if input_format == "blast":
ids = list(set([r[key][i][0] for key in list(r.keys()) for i in range(0, len(r[key]))]))
return r, ids
# If this is tango format then return all taxids found
return r, list(set(taxids))
def process_lineages(items):
"""
Looks up lineage information from taxids.
The lineage object is a list of taxonomic ids corresponding to the full lineage of a single taxid.
"""
taxid, ranks, taxdir, dbname, lineage = items
# Read the taxonomy db
ncbi_taxa = init_sqlite_taxdb(taxdir, dbname)
# Get ranks for each taxid in the lineage
lineage_ranks = ncbi_taxa.get_rank(lineage)
x = pd.DataFrame(lineage_ranks, index=["rank"]).T
x = x.loc[x["rank"].isin(ranks)].reset_index().T
x.columns = x.loc["rank"]
x.drop("rank", inplace=True)
x.index = [taxid]
# Add taxids for lower ranks in the hierarchy
x = propagate_lower(x, taxid, ranks)
# Add names for taxids
x = add_names(x, taxid, ncbi_taxa)
return x
def make_name_dict(df, ranks):
"""
Creates a dictionary of taxids to taxonomy names, including Unclassified ranks
Parameters
----------
df: pandas.DataFrame
Lineage dataframe
ranks: list
Ranks to store names information for
Returns
-------
name_dict: dict
Name dictionary mapping taxonomy ids to names
"""
name_dict = {}
for rank in ranks:
name_dict.update(dict(zip(df[rank].values, df["{}.name".format(rank)].values)))
name_dict.update(dict(zip(-abs(df[rank]), "Unclassified." + df["{}.name".format(rank)])))
name_dict[-1] = "Unclassified"
return name_dict
def make_lineage_df(taxids, taxdir, dbname, ranks, cpus=1):
"""
Creates a lineage dataframe with full taxonomic information for a list of taxids.
Example:
taxid species phylum genus genus.name phylum.name species.name
859655 305 1224 48736 Ralstonia Proteobacteria Ralstonia solanacearum
387344 1580 1239 1578 Lactobacillus Firmicutes Lactobacillus brevis
358681 1393 1239 55080 Brevibacillus Firmicutes Brevibacillus brevis
Parameters
----------
taxids: list
List of taxonomic ids to obtain information for
taxdir: str
Path to directory holding taxonomic info
dbname: str
Name of ete3 sqlite database within taxdir
ranks: list
Ranks to store information for
cpus: int
Number of cpus to use
Returns
-------
lineage_df: pandas.DataFrame
Data Frame with full taxonomic info
"""
# Read the taxonomy db
ncbi_taxa = init_sqlite_taxdb(taxdir, dbname)
lineages = ncbi_taxa.get_lineage_translator(taxids)
# Store potential missing taxids and warn user
missing_taxids = set([int(x) for x in taxids]).difference(lineages.keys())
# Get possible translations for taxids that have been changed
_, translate_dict = ncbi_taxa._translate_merged(list(set(taxids).difference(lineages.keys())))
rename = {y: x for x, y in translate_dict.items()}
# Update lineages with missing taxids
lineages.update(ncbi_taxa.get_lineage_translator(translate_dict.values()))
items = [[taxid, ranks, taxdir, dbname, lineages[taxid]] for taxid in list(lineages.keys())]
with Pool(processes=cpus) as pool:
res = list(
tqdm.tqdm(pool.imap(process_lineages, items), desc="Making lineages", total=len(items),
unit=" taxids", ncols=100))
lineage_df = pd.concat(res, sort=False)
lineage_df.rename(index=rename, inplace=True)
lineage_df.rename(index=lambda x: int(x), inplace=True)
for rank in ranks:
lineage_df[rank] = pd.to_numeric(lineage_df[rank])
name_dict = make_name_dict(lineage_df, ranks)
if len(missing_taxids) > 0:
sys.stderr.write("#WARNING: Missing taxids found:\n")
sys.stderr.write("#{}\n".format(",".join([str(x) for x in missing_taxids])))
sys.stderr.write("#To fix this, you can try to update the taxonomy database using\n")
sys.stderr.write("#tango download taxonomy --force\n")
return lineage_df.loc[:,lineage_df.dtypes==int], name_dict
def process_queries(args):
"""Receives a query and its results and assigns taxonomy"""
res_taxids = {}
min_rank_threshold = 0
query, res, rank_thresholds, top, reportranks, assignranks, mode, vote_threshold, lineage_df, taxidmap = args
if len(rank_thresholds) > 0 and "rank" in mode:
min_rank_threshold = min([x for x in rank_thresholds.values()])
columns = ['sseqid', 'pident', 'evalue', 'bitscore']
if len(res[0]) == 5:
columns += ['staxids']
# Create pandas dataframe for slice
res_df = pd.DataFrame(res, columns=columns, index=[query] * len(res))
# Add taxidmap if not present in results
if "staxids" not in res_df.columns:
res_df = pd.merge(res_df, taxidmap, left_on="sseqid", right_index=True, how="left")
# Calculate bit score threshold for slice
thresholds = get_thresholds(res_df, top=top)
# Set index
res_df.index.name = "qseqid"
# Merge with lineage df
res_df = pd.merge(res_df, lineage_df, left_on="staxids", right_index=True, how="left")
# Remove potential nan rows created if the blast results have taxids that are missing from lineage_df
res_df = res_df.loc[res_df[reportranks[0]] == res_df[reportranks[0]]]
# Initialize dictionary
res_taxids[query] = dict.fromkeys(reportranks, -1)
# Handle queries that return pandas Series
res_df = res_df.loc[res_df.bitscore >= thresholds[query]]
res_df = series2df(res_df)
lca_taxids = {}
# Parse with rank thresholds or by just filtering by bitscore
if "rank" in mode:
if len(res_df.loc[res_df.pident >= min_rank_threshold]) > 0:
lca_taxids = parse_with_rank_thresholds(res_df, assignranks, reportranks,
rank_thresholds, mode, vote_threshold)
else:
lca_taxids = get_lca(res_df, assignranks, reportranks)
# Update results with lca_taxids
res_taxids[query].update(lca_taxids)
res_taxids[query] = propagate_taxids(res_taxids[query], reportranks)
return res_taxids[query], query
def write_blobout(f, res_taxids, queries, ranks):
"""
Writes output in a format for use with blobtools
Parameters
----------
f: str
Outputfile
res_taxids: list
List of results for queries
queries: list
List of queries
ranks: list
Ranks to write results for
"""
rev_ranks = [ranks[x] for x in list(range(len(ranks) - 1, -1, -1))]
with open(f, 'w') as fhout:
for i, query in enumerate(queries):
d = res_taxids[i]
for rank in rev_ranks:
if rank in d.keys():
taxid = d[rank]
if taxid != -1:
fhout.write("{query}\t{taxid}\t1\tref\n".format(query=query, taxid=abs(taxid)))
break
def stage_queries(res, lineage_df, input_format="tango", rank_thresholds=[45, 60, 85], top=10, mode="rank_lca",
vote_threshold=0.5, assignranks=["phylum", "genus", "species"],
reportranks=["superkingdom", "phylum", "class", "order", "family", "genus", "species"],
taxidmap=None):
"""
Parameters
----------
res: dict
Dictionary with queries as keys and a list of hits as values
lineage_df: pandas.DataFrame
Data frame of taxids and taxonomic information
input_format: str
'tango' or 'blast'
rank_thresholds: list
List of thresholds for ranks
top: int
Only evaluate results within <top> percent bitscore of best scoring hit
mode: str
'rank_lca' or 'rank_vote' for rank thresholds usage or 'score' to just filter by bitscore
vote_threshold: float
Cutoff used to filter out common taxids
assignranks: list
Ranks used to assign taxonomy
reportranks: list
Ranks to report taxonomy for (inferred from assignranks)
taxidmap: dict
Dictionary with subject ids as keys and taxids as values
Returns
-------
items: list
List of items to send to multiprocessing
"""
items = []
total_queries = len(res)
for q in tqdm.tqdm(sorted(res.keys()), total=total_queries, unit=" queries", ncols=100, desc="Staging queries"):
# If the diamond output does not have standard tango format we do some work to add this information.
item = [q, res[q], rank_thresholds, top, reportranks, assignranks, mode, vote_threshold]
if input_format == "blast":
# Get all subject ids
s = list(set([res[q][i][0] for i in range(0, len(res[q]))]).intersection(lineage_df.index))
# Get all taxids for this query
q_taxids = taxidmap.loc[s, "staxids"].unique()
item += [lineage_df.loc[q_taxids], taxidmap.loc[s]]
# If diamond output has taxonomy id then directly create the list of results to
# feed into the multiprocessing pool
else:
# Get all taxids for query
q_taxids = list(set([res[q][i][-1] for i in range(0, len(res[q]))]).intersection(lineage_df.index))
item += [lineage_df.loc[q_taxids], None]
items.append(item)
return items
def parse_hits(diamond_results, outfile, taxidout=False, blobout=False, top=10, evalue=0.001, input_format="tango",
taxidmap=False, mode="rank_lca", vote_threshold=0.5, assignranks=["phylum", "genus", "species"],
reportranks=["superkingdom", "phylum", "class", "order", "family", "genus", "species"],
rank_thresholds=[45, 60, 85], taxdir="./taxonomy/", sqlitedb="taxonomy.sqlite", chunksize=1, cpus=1):
"""
This is the main function to handle diamond result files and assign taxonomy to queries.
The function performs the following steps:
1. Checks rank-specific thresholds
2. Reads the diamond results file
3. If required, maps subject ids to taxonomy ids
4. Creates a dataframe of all unique taxonomy ids found for subjects and their taxa names for each rank
5. Stages queries for multiprocessing
6. Processes each query and returns it with assigned taxonomy
7. Writes output to file
Parameters
----------
diamond_results: str
Diamond results file
outfile: str
File to write results to
taxidout: str
If True, write results with taxonomic ids instead of names to file
blobout: str
If True, write output in blobtools format
top: int
Evaluate hits within this bitscore percent range of the best scoring hit
evalue: float
Filter hits with evalue larger than this
input_format: str
'tango' or 'blast' depending on whether the diamond results has subject taxids in the last column or not
taxidmap: str
Path to a file mapping subject ids to taxids (needed if input_format != 'tango')
mode: str
How to assign taxonomy: 'rank_lca' and 'rank_vote' use rank specific thresholds,
'score' only filters by bitscore
vote_threshold: float
When using 'rank_vote' to assign taxonomy, this is the fraction of hits that must have the same taxid to
assign a taxonomy at a rank
assignranks: list
Ranks used to assign taxonomy
reportranks: list
Ranks to report taxonomy for
rank_thresholds: list
Percent identity thresholds for assigning taxonomy
taxdir: str
Path to directory holding taxonomic information files
sqlitedb: str
Name of ete3 sqlite database within taxdir
chunksize: int
The size of chunks for the iterable being submitted to the process pool
cpus: int
The number of worker processes to use
args:
Input arguments from __main__.py
Returns
-------
Return code 0 if function finished without issue
"""
# Set up rank thresholds
if "rank" in mode:
rank_thresholds = get_rank_thresholds(assignranks, rank_thresholds)
# Read diamond results
res, ids = read_df(diamond_results, top, evalue, input_format, taxidmap)
# Read protein -> taxidmap file if specified
taxidmap = | pd.DataFrame() | pandas.DataFrame |
""" ``xrview.handlers`` """
import asyncio
import numpy as np
import pandas as pd
from bokeh.document import without_document_lock
from bokeh.models import ColumnDataSource
from pandas.core.indexes.base import InvalidIndexError
from tornado import gen
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
# TODO this fixes issues with tornado>=5, but it might also be the reason for
# the backed up range update callbacks
# see https://github.com/tornadoweb/tornado/issues/2531
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
class DataHandler(object):
"""
Parameters
----------
data : pandas DataFrame
"""
def __init__(self, data):
self.source = ColumnDataSource(data)
self.source.add(data.index, "index")
class InteractiveDataHandler(DataHandler):
def __init__(self, data, context=None, verbose=False):
super(InteractiveDataHandler, self).__init__(data)
self.data = data
self.source_data = self.source.data
self.context = context
self.verbose = verbose
self.selection_bounds = None
self.selection = []
self.pending_update = False
self.update_buffer = None
self.callbacks = {
"update_data": [],
"reset_data": [],
"update_source": [],
}
def get_dict(self):
""" Get data as a dict. """
new_source_data = self.data.to_dict(orient="list")
new_source_data["index"] = self.data.index
for k in list(new_source_data):
if isinstance(k, tuple):
new_source_data["_".join(k)] = new_source_data.pop(k)
return new_source_data
@without_document_lock
@gen.coroutine
def update(self, **kwargs):
""" Update callback for handler. """
self.pending_update = True
self.update_data(**kwargs)
self.update_selection()
if self.context is not None and self.context.doc is not None:
self.context.doc.add_next_tick_callback(self.update_source)
@without_document_lock
@gen.coroutine
def reset(self):
""" Reset data and selection to be displayed. """
self.selection_bounds = None
self.selection = []
for c in self.callbacks["reset_data"]:
c()
if self.context is not None and self.context.doc is not None:
self.context.doc.add_next_tick_callback(self.update_source)
@without_document_lock
@gen.coroutine
def update_data(self, **kwargs):
""" Update data and selection to be displayed. """
self.source_data = self.get_dict()
for c in self.callbacks["update_data"]:
c()
@without_document_lock
@gen.coroutine
def update_selection(self):
""" Update selection. """
if (
self.source.selected is not None
and self.selection_bounds is not None
):
self.selection = list(
np.where(
(self.source_data["index"] >= self.selection_bounds[0])
& (self.source_data["index"] <= self.selection_bounds[1])
)[0]
)
else:
self.selection = []
@gen.coroutine
def update_source(self):
""" Update data and selected.indices of self.source """
if self.verbose:
print("Updating source")
self.source.data = self.source_data
if self.source.selected is not None:
self.source.selected.indices = self.selection
for c in self.callbacks["update_source"]:
c()
self.pending_update = False
if self.update_buffer is not None:
self.context.doc.add_next_tick_callback(self.update_buffer)
self.update_buffer = None
def add_callback(self, method, callback):
""" Add a callback to one of this instance's methods.
Parameters
----------
method : str
The name of the method this callback will be attached to.
callback : callable
The callback function.
"""
if method not in self.callbacks:
raise ValueError("Unrecognized method name: " + str(method))
if callback in self.callbacks[method]:
raise ValueError(
str(callback) + " has already been attached to this instance."
)
self.callbacks[method].append(callback)
class ResamplingDataHandler(InteractiveDataHandler):
"""
Parameters
----------
data : pandas DataFrame
factor : numeric
lowpass : bool, default False
context : TimeseriesViewer, optional
with_range : bool, default True
"""
def __init__(
self,
data,
factor,
lowpass=False,
context=None,
with_range=True,
verbose=False,
):
self.data = data
self.factor = factor
self.lowpass = lowpass
self.context = context
self.verbose = verbose
if with_range:
self.source_data = self.get_dict_from_range(
self.data.index[0], self.data.index[-1]
)
self.source = ColumnDataSource(self.source_data)
else:
self.source = ColumnDataSource(self.data)
self.source.add(self.data.index, "index")
self.source_data = self.source.data
self.selection_bounds = None
self.selection = []
self.pending_update = False
self.update_buffer = None
self.callbacks = {
"update_data": [],
"reset_data": [],
"update_source": [],
}
@staticmethod
def from_range(data, max_samples, start, end, lowpass):
""" Get sub-sampled pandas DataFrame from index range.
Parameters
----------
data : pandas DataFrame
The data to be sub-sampled
max_samples : numeric
The subsampling factor.
start : numeric
The start of the range to be sub-sampled.
end : numeric
The end of the range to be sub-sampled.
Returns
-------
data_new : pandas DataFrame
A sub-sampled slice of the data.
"""
# handle the case of no data
if data.shape[0] == 0:
return data
if start is None:
start = 0
else:
try:
start = data.index.get_loc(start, method="nearest")
except InvalidIndexError:
# handle non-ordered/non-unique index
start = np.argmin(np.abs(data.index - start))
if end is None:
end = data.shape[0]
else:
try:
end = data.index.get_loc(end, method="nearest") + 1
except InvalidIndexError:
# handle non-ordered/non-unique index
end = np.argmin(np.abs(data.index - end)) + 1
step = int(np.ceil((end - start) / max_samples))
# TODO: handle NaNs at start/end
if step == 0:
# hacky solution for range reset
data_new = pd.concat((data.iloc[:1], data.iloc[-1:]))
else:
data_new = data.iloc[start:end]
if step > 1 and lowpass:
# TODO make this work
from scipy.signal import butter, filtfilt
for c in data_new.columns:
if c != "selected":
coefs = butter(3, 1 / step)
data_new[c] = filtfilt(
coefs[0], coefs[1], data_new.loc[:, c]
)
data_new = data_new.iloc[::step]
# hacky solution for range reset
if start > 0:
data_new = | pd.concat((data.iloc[:1], data_new)) | pandas.concat |
from bs4 import BeautifulSoup
import os
import glob
import pandas as pd
if not os.path.exists("parsed_files"):
os.mkdir("parsed_files")
df = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import datetime
from scrape_data.queries import *
from scrape_data.mysql_connect import *
def choose_team(country_name, division):
"""
This function returns a list of the teams per country and will
populate a drop down in the callbacks
:param country_name:
:return: list of team names per country
"""
from datetime import datetime
#connecting to DB
conn = footy_connect()
#grabbing dataframe
df = grab_team_names(conn, division, country_name)
df = create_seasons_list(df)
today_year = datetime.now().year
full = str(today_year-1) + '/' + str(today_year)
df = df.loc[df['dateYear'] == full]
df = df.drop_duplicates(subset='home_team')
conn.close()
return df
def home_vs_away(df, team_name):
"""
Helper function to check out wins loses and draws for a given team.
:param df: dataframe containing information to determine home or
away wins
:return: df
"""
outcome = []
for index, row in df.iterrows():
if row["home_team"] == team_name and row["full_time_results"] == 'H':
outcome.append("win")
elif row['home_team'] == team_name and row['full_time_results'] == 'A':
outcome.append('lose')
elif row['away_team'] == team_name and row['full_time_results'] == 'A':
outcome.append('win')
elif row['away_team'] == team_name and row['full_time_results'] == 'H':
outcome.append('lose')
elif row["away_team_goals"] == row["home_team_goals"]:
outcome.append("draw")
df['outcome'] = outcome
return df
def run_win_pct(team_name, df):
"""
Function that calculates a teams winning percentage Year over Year (YoY)
Calculation:
Number of wins by the total number of competitions.
Then multiply by 100 = win percentage.
Number of loses by the total number of competitions.
Then multiply by 100 = loss percentage
this function also takes into account the home and away win/loss
percentages.
:param team_name: Takes in the state of the team_names dropdown
:return:a dataframe That returns percentages for specific teams
"""
df['home_team'] = df['home_team'].str.lower()
df['away_team'] = df['away_team'].str.lower()
team_name = team_name.lower()
df_home = df[df['home_team'] == team_name]
df_away = df[df['away_team'] == team_name]
frames = [df_home,df_away]
df_fill = pd.concat(frames)
df = home_vs_away(df_fill, team_name)
home_matches = df[df['home_team'] == team_name]
away_matches = df[df['away_team'] == team_name]
home_matches = home_matches.drop(columns = ['away_team'])
away_matches = away_matches.drop(columns = ['home_team'])
#wins per season
home_team_win = home_matches.groupby(["home_team","dateYear"])["outcome"].apply(
lambda x: x[x.str.contains("win")].count()).reset_index()
away_team_win = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('win')].count()).reset_index()
home_team_loss = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('lose')].count()).reset_index()
away_team_loss = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('lose')].count()).reset_index()
home_team_tie = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('draw')].count()).reset_index()
away_team_tie = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('draw')].count()).reset_index()
#matches played per season
searchFor = ['win','lose','draw']
matches_home = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index()
matches_away = away_matches.groupby(['away_team', 'dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index()
#goals for and against
match_numbers = matches_home.merge(matches_away, how='left', left_on='dateYear', right_on='dateYear')
loss_merge = home_team_loss.merge(away_team_loss, how='left', left_on='dateYear', right_on='dateYear')
tie_merge = home_team_tie.merge(away_team_tie, how='left', left_on='dateYear', right_on='dateYear')
fin = home_team_win.merge(away_team_win, how = 'left', left_on='dateYear', right_on='dateYear')
fin['Total Wins'] = fin['outcome_x'] + fin['outcome_y']
fin['Total Losses'] = loss_merge['outcome_x'] + loss_merge['outcome_y']
fin['Total Draws'] = tie_merge['outcome_x'] + tie_merge['outcome_y']
fin['Total Matches'] = match_numbers['outcome_x'] + match_numbers['outcome_y']
fin['Win PCT'] = (fin['Total Wins'] / fin['Total Matches'] * 100).round(2)
fin['Loss PCT'] = (fin['Total Losses'] / fin['Total Matches'] * 100).round(2)
fin['Draw PCT'] = (fin['Total Draws'] / fin['Total Matches'] * 100).round(2)
#home match percentage
fin['Home Win PCT'] = (home_team_win['outcome'] / matches_home['outcome'] * 100).round(2)
fin['Away Win PCT'] = (away_team_win['outcome'] / matches_away['outcome'] * 100).round(2)
fin['Home Loss PCT'] = (home_team_loss['outcome'] / matches_home['outcome'] * 100).round(2)
fin['Away Loss PCT'] = (away_team_loss['outcome'] / matches_away['outcome'] * 100).round(2)
return fin
def create_seasons_list(df=None, country = None):
"""
This function pretty much breaks down the date of a given match
then buckets that information to a particular season. I.E: a match played
on 02/07/2018 would be bucketed in the 2017/2018 season.
:param df: takes in a dataframe
:return: a dataframe
"""
if country is not None:
conn = footy_connect()
df = grab_data(conn, country)
real_dates = []
import re
m = '\d+'
for index, row in df.iterrows():
x = re.findall(m, row['dates'])
x = int(x[1])
if x >= 8:
year1 = int(row['dates'][-2:]) + 2000
year2 = year1 + 1
real_dates.append(str(year1) + "/" + str(year2))
elif x <= 5:
year1 = int(row['dates'][-2:]) + 2000
year2 = year1 - 1
real_dates.append(str(year2) + "/" + str(year1))
else:
real_dates.append(0)
df['dateYear'] = real_dates
df = df[df['dateYear'] != 0]
conn.close()
else:
real_dates = []
import re
m = '\d+'
for index, row in df.iterrows():
x = re.findall(m, row['dates'])
x = int(x[1])
if x >= 8:
year1 = int(row['dates'][-2:]) + 2000
year2 = year1 + 1
real_dates.append(str(year1) + "/" + str(year2))
elif x <= 5:
year1 = int(row['dates'][-2:]) + 2000
year2 = year1 - 1
real_dates.append(str(year2) + "/" + str(year1))
else:
real_dates.append(0)
df['dateYear'] = real_dates
df = df[df['dateYear'] != 0]
return df
def table_per_season(df, division, year):
"""
Function that returns a complete dataframe with what you would normally
see as a soccer league table. It contains the matches played, wins, draws,
loses, goals for/against, the difference, and the overall points a team
earned throughout the season.
:param country: an option from the countries dropdown
:param division: an option from the divsions dropdown
:param year: an option from the seasons dropdown
:return: a dataframe with the information for a specific season
"""
df = df[df['dateYear'] == year]
df = df[df['division'] == division]
#creating empty dataframe
final = pd.DataFrame()
#matches played
mp_home = df.groupby(['home_team'])['id'].count().reset_index()
mp_away = df.groupby(['away_team'])['id'].count().reset_index()
#resutls
w_home = df.groupby(['home_team'])['full_time_results'].apply(lambda x: x[x.str.contains('H')].count()).reset_index()
w_away = df.groupby(['away_team'])['full_time_results'].apply(lambda x: x[x.str.contains('A')].count()).reset_index()
l_home = df.groupby(['home_team'])['full_time_results'].apply(lambda x: x[x.str.contains('A')].count()).reset_index()
l_away = df.groupby(['away_team'])['full_time_results'].apply(lambda x: x[x.str.contains('H')].count()).reset_index()
d_home = df.groupby(['home_team'])['full_time_results'].apply(lambda x: x[x.str.contains('D')].count()).reset_index()
d_away = df.groupby(['away_team'])['full_time_results'].apply(lambda x: x[x.str.contains('D')].count()).reset_index()
#gf/ga
gf_home = df.groupby(['home_team'])['home_team_goals'].sum().reset_index()
gf_away = df.groupby(['away_team'])['away_team_goals'].sum().reset_index()
gfh = df.groupby(['home_team'])['away_team_goals'].sum().reset_index()
gfa = df.groupby(['away_team'])['home_team_goals'].sum().reset_index()
#calcs
final['Team'] = mp_home['home_team']
final['MP'] = mp_home['id'] + mp_away['id']
final['W'] = w_home['full_time_results'] + w_away['full_time_results']
final['D'] = d_home['full_time_results'] + d_away['full_time_results']
final["L"] = l_home['full_time_results'] + l_away['full_time_results']
final['GF'] = gf_home['home_team_goals'] + gf_away['away_team_goals']
final['GA'] = gfa['home_team_goals'] + gfh['away_team_goals']
final['+/-'] = final['GF'] - final['GA']
final['PTS'] = (final['W'] * 3) + (final['D'] * 1)
final = final.sort_values(by='PTS', ascending=False)
return final
def goal_stats(df, team):
"""
This function looks at the % of goals scored home and away
relative to the shots taken in each game by season.
"""
df_fin = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from trackintel.geogr.distances import check_gdf_planar, calculate_haversine_length
def calculate_modal_split(tpls_in, freq=None, metric="count", per_user=False, norm=False):
"""Calculate the modal split of triplegs
Parameters
----------
tpls_in : GeoDataFrame (as trackintel triplegs)
triplegs require the column `mode`.
freq : str
frequency string passed on as `freq` keyword to the pandas.Grouper class. If `freq=None` the modal split is
calculated on all data. A list of possible
values can be found `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset
-aliases>`_.
metric : {'count', 'distance', 'duration'}
Aggregation used to represent the modal split. 'distance' returns in the same unit as the crs. 'duration'
returns values in seconds.
per_user : bool, default: False
If True the modal split is calculated per user
norm : bool, default: False
If True every row of the modal split is normalized to 1
Returns
-------
modal_split : DataFrame
The modal split represented as pandas Dataframe with (optionally) a multi-index. The index can have the
levels: `('user_id', 'timestamp')` and every mode as a column.
Notes
------
`freq='W-MON'` is used for a weekly aggregation that starts on mondays.
If `freq=None` and `per_user=False` are passed the modal split collapses to a single column.
The modal split can be visualized using :func:`trackintel.visualization.modal_split.plot_modal_split`
Examples
--------
>>> triplegs.calculate_modal_split()
>>> tripleg.calculate_modal_split(freq='W-MON', metric='distance')
"""
tpls = tpls_in.copy()
# precalculate distance and duration if required
if metric == "distance":
if_planer_crs = check_gdf_planar(tpls)
if not if_planer_crs:
tpls["distance"] = calculate_haversine_length(tpls)
else:
tpls["distance"] = tpls.length
elif metric == "duration":
tpls["duration"] = tpls["finished_at"] - tpls["started_at"]
# create grouper
if freq is None:
if per_user:
tpls_grouper = tpls.groupby(["user_id", "mode"])
else:
tpls_grouper = tpls.groupby(["mode"])
else:
tpls.set_index("started_at", inplace=True)
tpls.index.name = "timestamp"
if per_user:
tpls_grouper = tpls.groupby(["user_id", "mode", | pd.Grouper(freq=freq) | pandas.Grouper |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
| tm.assert_numpy_array_equal(out, neg) | pandas.util.testing.assert_numpy_array_equal |
import requests
import pandas as pd
import re
import os
from bs4 import BeautifulSoup
def get_soup(url):
"""Wrapper function to Beautiful Soup.
Customised to parse json response.
Returns soup object.
"""
page = requests.get(url)
jsonResponse = page.json()
html = jsonResponse['html']
soup = BeautifulSoup(html, 'html.parser')
return(soup)
def get_seasons(url):
"""Get a list of available seasons.
Sample Record:
['WNBL 2020',
'2020',
'https://wnbl.basketball/stats/?&WHurl=%2Fcompetition%2F28356%2Fstatistics%2Fteam',
'28356']
"""
soup = get_soup(url)
options = soup.find_all("option")
seasons = []
for o in options:
# option HTML element
season_uri = o['value']
season_name = o.contents[0]
if (season_uri.startswith('https')):
# only save options with links
# extract the year from the season name
m = re.search('\d{4}',season_name)
season_year = m.group(0) if m else 1900
# competition ID
x = season_uri.split('%2F')
season_id = x[2]
seasons.append([ season_name, season_year, season_uri, season_id ])
return(seasons)
def get_player_data(html):
"""Get player data from table.
Returns dataframe.
"""
# gather data as list of lists
data = []
# column headings
header = [ele.text.strip() for ele in html.find('thead').find_all('th')]
# parse table body
rows = html.find('tbody').find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
# save row
# data.append([ele for ele in cols if ele]) # Get rid of empty values
data.append(cols)
# convert lists to dataframe
df = | pd.DataFrame(data,columns=header) | pandas.DataFrame |
import csv
import pickle
import pandas as pd
class HandleData:
def __init__(self, param_dict):
self.running_preds = pd.DataFrame()
self.target = param_dict['target']
self.model_file = param_dict['model_file']
self.file_id = self.model_file.split('.')[0]
self.end_row = len(param_dict['symbols'])-1
def data_triage(self, dict):
# write raw data to csv
self.write_mktdata_to_csv(dict)
# create dataframe and make new prediction with it
new_df = self.make_new_prediction(self.create_dataframe(dict))
#add new prediction to running data
running_df = self.add_to_running_stats(new_df)
# output running data to csv
self.write_predictions_to_csv(running_df)
def write_mktdata_to_csv(self, dict):
with open(self.file_id + '_DataOutput.csv', 'a', newline='') as csv_a, open(self.file_id + '_DataOutput.csv','r', newline='') as csv_r:
reader = csv.reader(csv_r)
writer = csv.DictWriter(csv_a, dict.keys())
# put each row into a dict
data = [row for row in reader]
# check to see if 2nd row is blank(since newline puts first entry on 2nd row)
try:
first_row_blank = True if data[1] == [] else False
except IndexError:
first_row_blank = True
if first_row_blank:
writer.writeheader()
writer.writerow(dict)
else:
writer.writerow(dict)
def write_predictions_to_csv(self, df):
with open(self.file_id + '_Predictions.csv', 'a', newline='') as csv_a, open(self.file_id + '_Predictions.csv','r', newline='') as csv_r:
reader = csv.reader(csv_r)
data = [row for row in reader]
try:
first_row_blank = True if data[1] == [] else False
except IndexError:
first_row_blank = True
if first_row_blank:
df.iloc[-1:].to_csv(csv_a, header=True)
else:
df.iloc[-1:].to_csv(csv_a, header=False)
def create_dataframe(self, dict):
df = pd.DataFrame([dict])
df['Timestamp'] = pd.to_datetime(df['Timestamp']) + | pd.Timedelta(hours=1, minutes=30) | pandas.Timedelta |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/14 21:21
Desc: 中国-香港-宏观指标
https://data.eastmoney.com/cjsj/foreign_8_0.html
"""
import demjson
import pandas as pd
import requests
def marco_china_hk_cpi() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数
https://data.eastmoney.com/cjsj/foreign_8_0.html
:return: 消费者物价指数
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "0",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def marco_china_hk_cpi_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数年率
https://data.eastmoney.com/cjsj/foreign_8_1.html
:return: 消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def marco_china_hk_rate_of_unemployment() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-失业率
https://data.eastmoney.com/cjsj/foreign_8_2.html
:return: 失业率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "2",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def marco_china_hk_gbp() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港 GDP
https://data.eastmoney.com/cjsj/foreign_8_3.html
:return: 香港 GDP
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "3",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def marco_china_hk_gbp_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港 GDP 同比
https://data.eastmoney.com/cjsj/foreign_8_4.html
:return: 香港 GDP 同比
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "4",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.t | o_numeric(temp_df['前值']) | pandas.to_numeric |
import os
import pickle
from functools import reduce
from tqdm import tqdm
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
from sklearn.metrics import roc_auc_score, plot_precision_recall_curve
import matplotlib.pyplot as plt
def optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:
'''
利用决策树获得最优分箱的边界值列表
'''
boundary = [] # 待return的分箱边界值列表
x = x.fillna(nan).values # 填充缺失值
y = y.values
clf = DecisionTreeClassifier(criterion='entropy', #“信息熵”最小化准则划分
max_leaf_nodes=6, # 最大叶子节点数
min_samples_leaf=0.05) # 叶子节点样本数量最小占比
clf.fit(x.reshape(-1, 1), y) # 训练决策树
n_nodes = clf.tree_.node_count
children_left = clf.tree_.children_left
children_right = clf.tree_.children_right
threshold = clf.tree_.threshold
for i in range(n_nodes):
if children_left[i] != children_right[i]: # 获得决策树节点上的划分边界值
boundary.append(threshold[i])
boundary.sort()
min_x = x.min()
max_x = x.max() + 0.1 # +0.1是为了考虑后续groupby操作时,能包含特征最大值的样本
boundary = [min_x] + boundary + [max_x]
return boundary
def ks_table(y_predict, y_true, ascending=False, is_cate=False, sep=None, nbins=10):
"""
计算KS分层表
:param y_predict: list, array, pandas.Series, 预测的概率或打分
:param y_true: list, array, pandas.Series, 真实的标签, 1或0, 只支持二分类
:param ascending: boolean, default False, 对y_predict排序的方式。
为False则降序排序,此时y_predict越大表示y_true为1(坏客户)的概率越大,一般适用于预测概率;
为True则升序排序,此时y_predict越小表示y_true为1(坏客户)的概率越大,一般适用于标准分;
:param sep: list, default None, 预设的分割点
:return: Pandas.DataFrame, 结果KS分层表
"""
if len(y_predict) < 10:
return None
if not isinstance(y_predict, pd.Series):
y_predict = pd.Series(y_predict)
if not isinstance(y_true, pd.Series):
y_true = pd.Series(y_true)
y_predict = y_predict.reset_index(drop=True)
y_true = y_true.reset_index(drop=True)
data = pd.concat([y_predict, y_true], axis=1).reset_index(drop=True)
data = data.dropna()
data.columns = ['score', 'flag']
data.sort_values(by='score', ascending=ascending, inplace=True)
data = data.reset_index(drop=True)
cnt_all = data.shape[0]
if not is_cate:
if sep is not None:
sep = sorted(sep)
if ascending:
data["group"] = pd.cut(data['score'], bins=sep, labels=False)
else:
data["group"] = pd.cut(data['score'], bins=sep, labels=False).map(lambda x: len(sep) - 1 - x)
nbins = len(sep) - 1
else:
data["group"] = data.index.map(lambda x: min(nbins - 1, (x + 1) // (cnt_all / nbins)))
sep = data.groupby("group").agg({"score": "max"})["score"].tolist()
if ascending:
sep[-1] = float("inf")
sep = [float("-inf")] + sep
else:
sep[0] = float("inf")
sep.append(float("-inf"))
else:
data['group'] = data['score']
nbins = data['score'].nunique()
table = pd.DataFrame(np.arange(1, nbins + 1), columns=['Rank'])
table['Total'] = data.groupby('group').size().reindex(table.index).fillna(0) # Total这一列存放每一组样本的总个数
table['Population'] = table['Total'] / cnt_all
table['MinScore'] = data[['score', 'group']].groupby(['group']).min()
table['MeanScore'] = data[['score', 'group']].groupby(['group']).mean()
table['MaxScore'] = data[['score', 'group']].groupby(['group']).max()
table['BadCnt'] = data[['flag', 'group']].groupby(['group']).sum().reindex(table.index).fillna(0)
table['bumps'] = 0
table['GoodCnt'] = table['Total'] - table['BadCnt'].fillna(0)
table['InnerBadRate'] = table['BadCnt'] / table['Total']
table['bumps'] = 0
for ind in range(1, table.shape[0]):
if table.loc[table.index[ind], 'InnerBadRate'] > table.loc[table.index[ind - 1], 'InnerBadRate']:
table.loc[table.index[ind], 'bumps'] = table.loc[table.index[ind - 1], 'bumps'] + 1
else:
table.loc[ind, 'bumps'] = table.loc[ind - 1, 'bumps']
table['bumps'] = table['bumps'].astype('int64').fillna(0)
table['BadRate'] = table['BadCnt'] / sum(table['BadCnt'])
table['CumTotalBadRate'] = table['BadRate'].cumsum()
table['GoodRate'] = table['GoodCnt'] / sum(table['GoodCnt'])
table['CumTotalGoodRate'] = table['GoodRate'].cumsum()
table['K-S'] = (table['CumTotalBadRate'] - table['CumTotalGoodRate'])
table['Lift'] = table['InnerBadRate'] * (table['BadCnt'].sum() + table['GoodCnt'].sum()) / table['BadCnt'].sum()
table['BadRate'] = table['BadRate'].apply(lambda x: format(x, '.2%'))
table['CumTotalBadRate'] = table['CumTotalBadRate'].apply(lambda x: format(x, '.2%'))
table['CumTotalGoodRate'] = table['CumTotalGoodRate'].apply(lambda x: format(x, '.2%'))
total_information = {'Rank': 'Total', 'Population': 1.0, 'Total': data.shape[0], 'MinScore': min(y_predict), 'MeanScore': np.mean(y_predict),
'MaxScore': max(y_predict), 'BadCnt': sum(table['BadCnt']), 'GoodCnt': sum(table['GoodCnt']),
'InnerBadRate': sum(table['BadCnt']) / len(data), 'bumps': '.', 'BadRate': '.', 'CumTotalBadRate': '.', 'CumTotalGoodRate': '.',
'GoodRate': sum(table['GoodCnt']) / len(data), 'K-S': max(table['K-S']), 'Lift': '.'}
table = table.append(total_information, ignore_index=True)
selected_columns = ['Rank', 'Population', 'Total', 'bumps', 'MinScore', 'MeanScore', 'MaxScore', 'BadCnt', 'GoodCnt', 'InnerBadRate', 'BadRate',
'CumTotalBadRate', 'GoodRate', 'CumTotalGoodRate', 'K-S', 'Lift']
table = table.loc[:, selected_columns]
table['InnerBadRate'] = table['InnerBadRate'].apply(lambda x: format(x, '.2%'))
table['GoodRate'] = table['GoodRate'].apply(lambda x: format(x, '.2%'))
table['K-S'] = table['K-S'].apply(lambda x: format(x, '.2%'))
table['Population'] = table['Population'].apply(lambda x: format(x, '.2%'))
table['Lift'] = table['Lift'].map(lambda x: round(x, 2) if x != '.' else 1)
table['MinScore'] = table['MinScore'].apply(lambda x: format(x, '.2f'))
table['MaxScore'] = table['MaxScore'].apply(lambda x: format(x, '.2f'))
table['MeanScore'] = table['MeanScore'].apply(lambda x: format(x, '.2f'))
return table, sep
def iv_table(x: pd.Series, y: pd.Series, is_cate: bool = False, nan: float = -999.) -> pd.DataFrame:
'''
计算变量各个分箱的WOE、IV值,返回一个DataFrame
'''
#x = x.fillna(nan)
df = pd.concat([x, y], axis=1) # 合并x、y为一个DataFrame,方便后续计算
df.columns = ['x', 'y'] # 特征变量、目标变量字段的重命名
stat_num = df['y'].value_counts()
good_num,bad_num = stat_num[0],stat_num[1]
total_num = good_num+bad_num
df = df.dropna()
if not is_cate:
boundary = optimal_binning_boundary(df.x, df.y, nan) # 获得最优分箱边界值列表
df['bins'] = pd.cut(x=x, bins=boundary, right=False) # 获得每个x值所在的分箱区间
else:
df['bins'] = df['x']
grouped = df.groupby('bins')['y'] # 统计各分箱区间的好、坏、总客户数量
result_df = grouped.agg([('good', lambda y: (y == 0).sum()),
('bad', lambda y: (y == 1).sum()),
('total', 'count')])
result_df['good_pct'] = result_df['good'] / result_df["good"].sum() # 好客户占比
result_df['bad_pct'] = result_df['bad'] / result_df["bad"].sum() # 坏客户占比
result_df['goodRecall'] = result_df['good'] / good_num # 好客户占比
result_df['badRecall'] = result_df['bad'] / bad_num # 坏客户占比
result_df['groupRecall'] = result_df['total'] / total_num # 总客户占比
result_df['innerBadRate'] = result_df['bad'] / result_df['total'] # 坏比率
result_df['woe'] = np.log(result_df['good_pct'] / result_df['bad_pct']) # WOE
result_df['iv'] = (result_df['good_pct'] - result_df['bad_pct']) * result_df['woe'] # IV
result_df['lift'] = (result_df['bad']/result_df['total'])/(bad_num/total_num) # lift
result_df = result_df.reset_index()
result_df.sort_values("bins",ascending=False,inplace=True)
result_df['bins'] = result_df['bins'].astype('str')
total_information = {'bins': 'Total', 'good': result_df['good'].sum(), 'bad': result_df['bad'].sum(), 'total': result_df['total'].sum(), 'good_pct': result_df['good_pct'].sum(),
'bad_pct': result_df['bad_pct'].sum(), 'goodRecall': result_df['goodRecall'].sum(), 'badRecall': result_df['badRecall'].sum(),
'groupRecall': result_df['groupRecall'].sum(), 'innerBadRate': result_df["bad"].sum()/(result_df["good"].sum()+result_df["bad"].sum()),
'woe': '.','iv': result_df['iv'].sum(), 'lift': (result_df["bad"].sum()/(result_df["good"].sum()+result_df["bad"].sum()))/(bad_num/total_num)}
result_df = result_df.append(total_information, ignore_index=True)
return result_df
def df_auc(x: pd.Series, y: pd.Series, ifnan:bool=False, nan: float = -999.) -> float:
'''计算基于dataframe数据的auc'''
df = pd.concat([x, y], axis=1)
df.columns = ['x', 'y']
if ifnan:
df = df.fillna(nan)
else:
df = df.dropna()
return roc_auc_score(df.y, df.x)
def score_summary(x: pd.Series, y: pd.Series, is_cate: bool = False, ifnan:bool=False, nan: float = -999.) -> dict:
summary_dict = {'iv':None, 'auc':None, 'coverage_rate':None}
df = pd.concat([x, y], axis=1)
df.columns = ['x', 'y']
# 1、覆盖率
coverage_rate = round(1-df.x.isna().sum()/len(df), 4)
summary_dict['coverage_rate'] = coverage_rate
if ifnan:
df = df.fillna(nan)
else:
df = df.dropna()
# 2、ks相关
ks_t = ks_table(df.x, df.y, is_cate=is_cate, ascending=False, sep=None)
summary_dict['ks'] = float(ks_t[0]['K-S'].iloc[-1].strip('%'))
#auc相关
auc = df_auc(df.x, df.y, ifnan=False)
summary_dict['auc'] = round(auc,4)
# 3、iv相关
iv_t = iv_table(df.x, df.y, is_cate=is_cate)
summary_dict['iv'] = iv_t['iv'].iloc[-1]
return iv_t, ks_t[0], summary_dict
class Psi(object):
def __init__(self, bins: int = 10, minimal: int = 1):
self.bins = bins
self.minimal = minimal
self.psi_detail = dict()
self.psi = pd.DataFrame()
self.base = dict()
def _distribution_continuous(self, series: pd.Series, bins: list = None):
if bins:
bins[0] = float("-inf")
bins[-1] = float("inf")
series_cut = | pd.cut(series, bins=bins) | pandas.cut |
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from yitian.datasource import *
from yitian.datasource import preprocess
class Test(unittest.TestCase):
# def test_standardize_date(self):
# data_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['Trade Date', 'price'])
#
# expect_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['date', 'price'])
#
# assert_frame_equal(expect_pd, preprocess.standardize_date(data_pd))
#
# def test_standardize_date_with_multi_date_column(self):
# data_pd = pd.DataFrame([
# ['2019-01-01 00:00:00', '2019-01-01 00:00:00', 11.11],
# ['2019-01-02 00:00:00', '2019-01-01 00:00:00', 22.22],
# ['2019-01-03 00:00:00', '2019-01-01 00:00:00', 33.33],
# ['2019-01-04 00:00:00', '2019-01-01 00:00:00', 44.44],
# ], columns=['DATE', 'date', 'price'])
#
# with self.assertRaises(ValueError) as context:
# preprocess.standardize_date(data_pd)
#
# assert str(context.exception) == \
# str("Original cols ({cols}) cannot be reconnciled with date options ({option})"\
# .format(cols=data_pd.columns.tolist(), option=RAW_DATE_OPTIONS))
def test_create_ts_pd(self):
data_pd = pd.DataFrame([
['01/01/2019', 11.11],
['01/04/2019', 44.44],
['01/03/2019', 33.33],
['01/02/2019', 22.22]
], columns=['date', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[ | pd.Timestamp('2019-01-02') | pandas.Timestamp |
import os, glob, re, chardet
import pandas as pd
from statistics import mode
############################################
# clean_weird function
############################################
def clean_weird(vector_dirty, extra=False):
## # ============================
## # ORDER MATTERS HERE
## # ============================
## # strip and to lower
vector_clean = vector_dirty.str.strip()
vector_clean = vector_clean.str.lower()
## # city names at the end
vector_clean = vector_clean.str.replace(r'(,\s\w+(\s\w+)?)$', '', regex=True)
## # ============================
## # remove weird symbols
## # ============================
vector_clean = vector_clean.str.replace(r'á','a', regex=True)
vector_clean = vector_clean.str.replace(r'ã','a', regex=True)
vector_clean = vector_clean.str.replace(r'ä','a', regex=True)
vector_clean = vector_clean.str.replace(r'é','e', regex=True)
vector_clean = vector_clean.str.replace(r'ë','e', regex=True)
vector_clean = vector_clean.str.replace(r'É','E', regex=True)
vector_clean = vector_clean.str.replace(r'í','i', regex=True)
vector_clean = vector_clean.str.replace(r'ó','o', regex=True)
vector_clean = vector_clean.str.replace(r'ö','o', regex=True)
vector_clean = vector_clean.str.replace(r'ü','u', regex=True)
vector_clean = vector_clean.str.replace(r'ñ','n', regex=True)
## # ============================
## # remove company designations
## # ============================
## # see:
## # https://www.corporateinformation.com/Company-Extensions-Security-Identifiers.aspx
## # https://www.nathantrust.com/insights/comprehensive-guide-to-a-designated-activity-company-dac
if extra==True:
## # combos: as,sl,scs,sa,sac,sau,sas,spa,sanv, etc. (with and without intermediate . or /)
s_chars = r'(a\W?s\W?|s\W?((a|e|p|c|l)\W?)?((a|s|u)\W?)?\W?(n\W?v\W?)?(r\W?l\W?)?)$'
vector_clean = vector_clean.str.replace(s_chars, '', regex=True)
## # combos: nv,nvsa,bv,oyj,ltd, etc. (with and without intermediate . or /)
s_chars = r'((n|b)\W?v\W{0,2}?(s\W?a\W?)?|o\W?y\W?j\W?|l\W?t\W?d\W?)$'
vector_clean = vector_clean.str.replace(s_chars, '', regex=True)
## # combos: cvba,ag,plc,dac, etc. (with and without intermediate . or /)
s_chars = r'(c\W?v\W?b\W?a\W?|a\W?g\W?|p\W?l\W?c\W?|d\W?a\W?c\W?)$'
vector_clean = vector_clean.str.replace(s_chars, '', regex=True)
## # combos: ,(g)mbh, kgaa, etc. (with and without intermediate . or /)
s_chars = r'((g\W?)?m\W?b\W?h\W?|k\W?g\W?a\W?a\W?)$'
vector_clean = vector_clean.str.replace(s_chars, '', regex=True)
## # specifics
s_chars = r'(\W(sa)\s(\wt)\W(expl)\W(p)\W(g)\W(cl)\W)$'
vector_clean = vector_clean.str.replace(s_chars, '', regex=True)
s_chars = r'(\W(soc)\W(an)\W(d)\W(gest)\W(st)\W(d)\W(sec)\W)$'
vector_clean = vector_clean.str.replace(s_chars, '', regex=True)
vector_clean = vector_clean.str.replace(r'-',' ', regex=True)
vector_clean = vector_clean.str.replace(r'\s{2,}',' ', regex=True)
vector_clean = vector_clean.str.replace(r'[^\w\s]','', regex=True)
vector_clean = vector_clean.str.strip()
return(vector_clean)
############################################
# function merge_csv
############################################
def merge_csv(save_dir, file_dir, file_name):
## # location
os.chdir(file_dir)
## # list files
all_files = [i for i in glob.glob("*.csv")]
## # regular expression for date
regex = re.compile(r'\d+')
## # iterating through data
all_df = [] # to concatenate all data
encode = [] # to save all encodings
for file in all_files:
## # check encoding of files: open first 10'000 bytes
with open(file, 'rb') as rawdata:
encoding = chardet.detect(rawdata.read(10000))
## print(encoding)
## # 73% of confidence in each file
encode.append(encoding['encoding']) # to use in final file
## # load data frame
df = | pd.read_csv(file, sep=',', encoding=encoding['encoding']) | pandas.read_csv |
"""
This file contains a couple functions for taking a dataframe of measured temperatures
and associated starnames/instruments, and returning a corrected temperature with error bar.
The correction needed is determined in the iPython notebook 'CheckCCFSystematics'.
"""
import pandas as pd
def get_real_temperature_newmethod(df, addmode='simple'):
"""
See docstring for get_real_temperature. This function is only for legacy support.
"""
return get_real_temperature(df, addmode=addmode)
def get_real_temperature(df, addmode='simple'):
"""
Given a dataframe of observations, find the actual temperature and uncertainty for each star
Parameters:
===========
- df: pandas DataFrame
The input dataframe. Must have the following keys:
- 'Star'
- '[Fe/H]'
- 'vsini' (which is the vsini of the secondary star)
- 'Instrument'
- 'Temperature' (which is the measured temperature)
- addmode: string
The way the individual order CCFs were co-added
Returns:
========
corrected: pandas DataFrame
A dataframe with the corrected temperature, and its upper and lower errors.
"""
from kglib.utils import HDF5_Helpers
hdf_interface = HDF5_Helpers.Full_CCF_Interface()
# Group by the star name.
star_groups = df.groupby('Star')
starnames = star_groups.groups.keys()
metal = []
vsini = []
corrected_list = []
for starname in starnames:
# Get the measured temperature for each observation
star_df = star_groups.get_group(starname)
m_list = []
for _, r in star_df.iterrows():
m_list.append(
hdf_interface.get_measured_temperature(r['Star'], r['Date'], r['Temperature'], r['Instrument']))
measurements = pd.concat(m_list, ignore_index=True)
# Convert the measurements to actual temperatures
corrected = HDF5_Helpers.convert_measured_to_actual(measurements.copy())
corrected_list.append(corrected)
corrected = | pd.concat(corrected_list, ignore_index=True) | pandas.concat |
from application import app
from flask import Flask, redirect, request, url_for,render_template, Response, jsonify
import pandas as pd
from itertools import combinations
from random import choices
import random
def calculator(player1_move,player2_move,move_hist):
if player1_move == "c":
if player2_move == "c":
return [2,2]
else:
return [0,3]
elif player1_move == "d":
if player2_move == "d":
return [1,1]
else:
return [3,0]
def C_AlwaysC(turn,player,move_hist):
if turn == 0:
return "c"
else:
return "c"
def C_AlwaysD(turn,player,move_hist):
if turn == 0:
return "c"
else:
return "d"
def D_AlwaysC(turn,player,move_hist):
if turn == 0:
return "d"
else:
return "c"
def D_AlwaysD(turn,player,move_hist):
if turn == 0:
return "d"
else:
return "d"
def C_TitForTat(turn,player,move_hist):
if player == 1:
if turn == 0:
return "c"
else:
return move_hist[1::2][-1]
elif player == 2:
if turn == 0:
return "c"
else:
return move_hist[0::2][-1]
def C_UntilD(turn,player,move_hist):
if player == 1:
if turn == 0:
return "c"
else:
if "d" not in move_hist[1::2]:
return "c"
else:
return "d"
elif player == 2:
if turn == 0:
return "c"
else:
if "d" not in move_hist[0::2]:
return "c"
else:
return "d"
def C_TitFor2Tat(turn,player,move_hist):
if player == 1:
if turn <= 1:
return "c"
else:
if (move_hist[1::2][-1] == "d") & (move_hist[1::2][-2] == "d"):
return "d"
else:
return "c"
elif player == 2:
if turn <= 1:
return "c"
else:
if (move_hist[0::2][-1] == "d") & (move_hist[0::2][-2] == "d"):
return "d"
else:
return "c"
def Random_70C(turn,player,move_hist):
if turn == 0:
return choices(["c","d"], weights=[0.7,0.3])[0]
else:
return choices(["c","d"], weights=[0.7,0.3])[0]
def Random_70D(turn,player,move_hist):
if turn == 0:
return choices(["c","d"], weights=[0.3,0.7])[0]
else:
return choices(["c","d"], weights=[0.3,0.7])[0]
def D_TitFor2TatExploiter(turn,player,move_hist):
if turn == 0:
return "d"
else:
if (turn%2) == 0:
return "d"
else:
return "c"
def C_OccasionalDefector(turn,player,move_hist):
if turn <= 6:
return "c"
else:
if "d" not in move_hist[-10:]:
return "d"
else:
return "c"
def D_AlwaysCExploiter(turn,player,move_hist):
if player == 1:
if turn == 0:
return "d"
else:
if "d" not in (move_hist[1::2]):
return "d"
else:
return move_hist[0::2][-1]
elif player == 2:
if turn == 0:
return "d"
else:
if "d" not in (move_hist[1::2]):
return "d"
else:
return move_hist[0::2][-1]
def strategies_menu():
return C_AlwaysD,C_AlwaysC,D_AlwaysD,C_TitForTat,C_UntilD,Random_70C,Random_70D,C_TitFor2Tat,D_TitFor2TatExploiter,C_OccasionalDefector,D_AlwaysCExploiter,D_AlwaysC
def players(winner,loser,g,df1):
if g == 0:
playing = 12
player_list = []
start_strategy = []
for p in range(1,playing+1):
player_list = player_list + ['player'+str(p)]
start_strategy = start_strategy + [p-1]
df = pd.DataFrame(index=player_list,columns=['strategy_history'],data=start_strategy)
df.loc[loser] = [df.loc[winner]]
else:
df = pd.DataFrame(index=df1.index,columns=['strategy_history'],data=list(df1))
df.loc[loser] = df1.loc[winner]
return df
def fixtures(winner,loser,g,df1):
fixture_list = list(combinations(players(winner,loser,g,df1).index,2))
return fixture_list
def game(winner,loser,g,df1):
gamelength = 10
fixture_list = list(fixtures(winner,loser,g,df1))
player_strategies = players(winner,loser,g,df1).strategy_history
round_totals = pd.DataFrame()
for n in range(len(fixtures(winner,loser,g,df1))):
df = pd.DataFrame()
move_hist = []
points = []
for turn in range(gamelength):
player1_name = fixture_list[n][0]
player2_name = fixture_list[n][1]
player1_move = strategies_menu()[player_strategies.loc[player1_name]](turn,1,move_hist)
player2_move = strategies_menu()[player_strategies.loc[player2_name]](turn,2,move_hist)
player_scores = calculator(player1_move,player2_move,move_hist)
move_hist = move_hist + [player1_move] + [player2_move]
points = points + [player_scores[0]] + [player_scores[1]]
df = pd.DataFrame([move_hist[0::2],move_hist[1::2],points[0::2],points[1::2]]).T
df.rename(columns={0:player1_name+"_move",1:player2_name+"_move",2:player1_name,3:player2_name},inplace=True)
#print(df)
round_totals = pd.concat([round_totals,df.iloc[:,-2:].sum(axis=0)],axis=1)
return round_totals.sum(axis=1)
def match(g,winner,loser,df1):
rounds = 5
df = pd.DataFrame()
for r in range(rounds):
df = pd.concat([df,game(winner,loser,g,df1)],axis=1)
totals = pd.Series(df.sum(axis=1),name='Generation'+'_'+str(g))
return pd.merge(players(winner,loser,g,df1),totals,how='outer',left_index=True,right_index=True)
def tournament():
generations = 50
df = pd.DataFrame()
winner = 'player1'
loser = 'player1'
df1 = df.copy()
for g in range(generations):
if g == 0:
df = pd.concat([df,match(g,winner,loser,df1)],axis=1)
scores = df.iloc[:,-1]
winner = choices(list(scores.loc[scores == scores.max()].index))[0]
loser = choices(list(scores.loc[scores == scores.min()].index))[0]
else:
df1 = df.iloc[:,-2].copy()
df = pd.concat([df,match(g,winner,loser,df1)],axis=1)
scores = df.iloc[:,-1]
winner = choices(list(scores.loc[scores == scores.max()].index))[0]
loser = choices(list(scores.loc[scores == scores.min()].index))[0]
return df
#############################
#############################
#############################
@app.route('/', methods=['GET','POST'])
def home():
return "wanna play?" + '<br><br><a href="/play">Start a tournament?</a> </br>'
@app.route('/play', methods=['GET','POST']) # to move into strategies microservice
def play():
strategy_list = []
for s in strategies_menu():
strategy_list.append(s.__name__)
maps = {}
for m in range(len(strategy_list)):
maps.update({m:strategy_list[m]})
df6 = match(0,'player1','player1', | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
def createVectorScores(line_bins:list, start_zero:bool):
"""
Create list of values from bins frequencies list. give only a list with the frequencies. The method assumes that the first position correspond the score 0. If start_zero is False, the score zero wasn't considered.
:param line_bins: list of frequencies
:param start_zero: to know if the score 0 is considered or not
:type line_bins: int
:type start_zero: int
:return: array with the proteins ids
:rtype: array[int]
"""
bins_value = 0
n_repeats = 0
list_Values = []
if start_zero == False:
bins_value = 1
line_bins.pop(0)
for n_repeat in line_bins:
array_bins_aux = np.repeat(bins_value, n_repeat)
list_Values = np.append(list_Values, array_bins_aux)
bins_value +=1
list_Values = list_Values.astype(int)
return list_Values
def countNumberofScores(list_frequencys:list):
"""
count the number of scores (not the sum of them but the quantity)
:param list_frequencys: list of frequencies
:type line_bins: list(1,4,3,...)
:return: quantity of elements
:rtype: int
"""
list_without_zeros = [element for element in list_frequencys if element != 0]
qty_elements = np.sum(list_without_zeros)
return qty_elements
def createVecSizeOfBins(max_score:int, size_bins:int):
"""
calculate the vector of bins used in case you want a specific size of bins
:param max_score: highest score that you can have in the bin
:param size_bins: size of the bins
:type max_score: int
:type size_bins: int
:return: quantity of elements
:rtype: int
"""
array_bins_config = np.arange(0, max_score, size_bins)
if max_score % size_bins != 0:
array_bins_config = np.concatenate((array_bins_config, [max_score]))
return array_bins_config
def createVecNumberOfBins(max_score:int, number_of_bins:int):
"""
calculate the vector of bins used in case you want a specific number of bins
:param max_score: highest score that you can have in the bin
:param number_of_bins: number of bins
:type max_score: int
:type number_of_bins: int
:return: vector of bins
:rtype: array[int]
"""
size_each_bins = max_score / number_of_bins
array_bins_config = np.arange(0, max_score, size_each_bins)
max_score_bin = np.max(array_bins_config)
if max_score % max_score_bin != 0:
#array_bins_config.append(max_score)
array_bins_config = np.concatenate((array_bins_config, [max_score]))
return array_bins_config
def createHeaderBinsSize(vec_histo_values:np.array):
"""
create the header for the dataset with the size of bins
:param vec_histo_values: vec with the intervals of each bins
:type vec_histo_values: numPy array
:return: vec with the labels
:rtype: int
"""
vec_designation = ['interaction_ID']
qty_elements = len(vec_histo_values) - 1
aux_count = 0
while aux_count < qty_elements:
element_start = vec_histo_values[aux_count]
index_element_end = aux_count + 1
element_end = vec_histo_values[index_element_end]
bins_interval_start = float("{0:.3f}".format(element_start))
bins_interval_end = float("{0:.3f}".format(element_end))
string_bin_designation = 'bin_' + str(bins_interval_start) + '_' + str(bins_interval_end)
vec_designation.append(string_bin_designation)
aux_count += 1
vec_designation.append('label')
return vec_designation
def createHeaderBinsNumber(number_of_bins:int):
"""
create the header for the dataset with the number of bins
:param number_of_bins: number of bins
:type number_of_bins: int
:return: vec with the labels
:rtype: int
"""
vec_designation = ['interaction_ID']
aux = 0
while aux < number_of_bins:
string_designation_bins = 'bins_' + str(aux)
vec_designation.append(string_designation_bins)
aux += 1
vec_designation.append('label')
return vec_designation
def dataTreatmentVecBins(dataframe_csv_base:pd.DataFrame, save_zeros:bool):
"""
split the data into three vectors
:note: vec_ids = ids of the interactions
:note: vec_labels = labels of the interactions
:note: matrix_frequencies = matric of vectors with the frequencies of each score
:param number_of_bins: number of bins
:type number_of_bins: int
:return: vec with the labels
:rtype: int
"""
vec_ids = []
vec_labels = []
matrix_frequencies = []
for index, row in dataframe_csv_base.iterrows():
id_interaction = row.Interaction_ID
label_value = row.Label
list_values = row.values.tolist()
#Remove the id
list_values.pop(0)
#Remove the label
list_values.pop()
list_frequencies_hist = createVectorScores(list_values, save_zeros)
qty_elements = countNumberofScores(list_values)
qty_scores = len(list_frequencies_hist)
assert qty_scores == qty_elements
vec_ids.append(id_interaction)
vec_labels.append(label_value)
matrix_frequencies.append(list_frequencies_hist)
#print('N row treated {0}'.format(len(vec_labels)))
return vec_ids, vec_labels, matrix_frequencies
def constructSizeOfBins(vec_ids:list, vec_labels:list, matrix_frequencies:list, max_score:int, size_of_bins:int):
"""
This method create the vec of bins score based on the size of bins
:param vec_ids = ids of the interactions
:param vec_labels = labels of the interactions
:param matrix_frequencies = matrix of vectors with the frequencies of each score
:param max_score = max score found in the dataset based
:param size_of_bins = size of the bins
:type vec_ids: list
:type vec_labels: list
:type matrix_frequencies: list
:type max_score: int
:type size_of_bins: int
:return: dataframe with the bins
:rtype: Dataframe
"""
vec_histo_size = createVecSizeOfBins(max_score, size_of_bins)
data_bins_treated = []
for counter, vec_frequencies in enumerate(matrix_frequencies):
interaction_id = vec_ids[counter]
label_value = vec_labels[counter]
histogram_vec_dist = np.histogram(vec_frequencies, bins = vec_histo_size)
data_histo = histogram_vec_dist[0].flatten()
data_histo = data_histo.tolist()
data_histo.append(label_value)
data_histo.insert(0, interaction_id)
data_bins_treated.append(data_histo)
vec_labels = createHeaderBinsSize(histogram_vec_dist[1])
print(vec_labels)
dataframe_data = pd.DataFrame(data=data_bins_treated, columns=vec_labels)
return dataframe_data
def constructNumberOfBins(vec_ids:list, vec_labels:list, matrix_frequencies:list, max_socre:int, number_of_bins:int):
"""
This method create the vec of bins score based on the number of bins
:note: the number of bins correspond to the max score/number of bins
:note vec_ids = ids of the interactions
:note vec_labels = labels of the interactions
:note matrix_frequencies = matrix of vectors with the frequencies of each score
:note max_score = max score found in the dataset based
:note number_of_bins = size of the bins
:type vec_ids: list
:type vec_labels: list
:type matrix_frequencies: list
:type max_score: int
:type number_of_bins: int
:return: dataframe with the bins
:rtype: Dataframe
"""
vec_number_of_bins = createVecNumberOfBins(max_score, number_of_bins)
data_bins_treated = []
for counter, vec_frequencies in enumerate(matrix_frequencies):
interaction_id = vec_ids[counter]
label_value = vec_labels[counter]
histogram_vec = np.histogram(vec_frequencies, bins = vec_number_of_bins)
data_histo = histogram_vec[0].flatten()
data_histo = data_histo.tolist()
data_histo.append(label_value)
data_histo.insert(0, interaction_id)
data_bins_treated.append(data_histo)
vec_labels = createHeaderBinsNumber(number_of_bins)
dataframe_data = pd.DataFrame(data=data_bins_treated, columns=vec_labels)
print(dataframe_data)
return dataframe_data
def writeDataframeToCSV(dataframe_bins:pd.DataFrame, file_write:str):
"""
Save the dataframe to a csv
:note dataframe_bins = dataframe that contain the bins scores
:note file_write = name of the file
:type dataframe_bins: Dataframe
:type file_write: str
"""
dataframe_bins.to_csv(index=False, sep=',', path_or_buf=file_write)
path_csv = 'bins_base_573_SB1_ZERO.csv'
dataframe_csv_base = pd.read_csv(filepath_or_buffer=path_csv, delimiter=',')
print(dataframe_csv_base)
dataframe_bins_data = pd.DataFrame()
dataframe_bins_bins_values = | pd.DataFrame() | pandas.DataFrame |
from typing import Generator
import pandas
import pytest
from neo4j import DEFAULT_DATABASE
from graphdatascience.graph_data_science import GraphDataScience
from graphdatascience.query_runner.neo4j_query_runner import Neo4jQueryRunner
from graphdatascience.query_runner.query_runner import QueryRunner
from graphdatascience.server_version.server_version import ServerVersion
GRAPH_NAME = "g"
@pytest.fixture(autouse=True)
def run_around_tests(runner: Neo4jQueryRunner) -> Generator[None, None, None]:
# Runs before each test
runner.run_query(
"""
CREATE
(a: Node {x: 1, y: 2}),
(b: Node {x: 2, y: 3}),
(c: Node {x: 3, y: 4}),
(a)-[:REL {relX: 4, relY: 5}]->(b),
(a)-[:REL {relX: 5, relY: 6}]->(c),
(b)-[:REL {relX: 6, relY: 7}]->(c),
(b)-[:REL2]->(c)
"""
)
yield # Test runs here
# Runs after each test
runner.run_query("MATCH (n) DETACH DELETE n")
runner.run_query(f"CALL gds.graph.drop('{GRAPH_NAME}', false)")
def test_project_graph_native(gds: GraphDataScience) -> None:
G, result = gds.graph.project(GRAPH_NAME, "*", "*")
assert G.name() == GRAPH_NAME
assert result["graphName"] == GRAPH_NAME
result = gds.graph.exists(G.name())
assert result["exists"]
def test_project_graph_native_estimate(gds: GraphDataScience) -> None:
result = gds.graph.project.estimate("*", "*")
assert result["requiredMemory"]
def test_project_graph_cypher(gds: GraphDataScience) -> None:
node_query = "MATCH (n:Node) RETURN id(n) as id"
relationship_query = "MATCH (n:Node)-->(m:Node) RETURN id(n) as source, id(m) as target, 'T' as type"
G, result = gds.graph.project.cypher(GRAPH_NAME, node_query, relationship_query)
assert G.name() == GRAPH_NAME
assert result["graphName"] == GRAPH_NAME
result = gds.graph.exists(G.name())
assert result["exists"]
def test_project_graph_cypher_estimate(gds: GraphDataScience) -> None:
node_query = "MATCH (n:Node) RETURN id(n) as id"
relationship_query = "MATCH (n:Node)-->(m:Node) RETURN id(n) as source, id(m) as target, 'T' as type"
result = gds.graph.project.cypher.estimate(node_query, relationship_query)
assert result["requiredMemory"]
def test_project_subgraph(runner: QueryRunner, gds: GraphDataScience) -> None:
from_G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
subG, result = gds.beta.graph.project.subgraph("s", from_G, "n.x > 1", "*", concurrency=2)
assert subG.name() == "s"
assert result["graphName"] == "s"
result2 = gds.graph.list(subG)
assert result2["nodeCount"][0] == 2
runner.run_query(f"CALL gds.graph.drop('{subG.name()}')")
def test_graph_list(gds: GraphDataScience) -> None:
result = gds.graph.list()
assert len(result) == 0
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
result = gds.graph.list()
assert len(result) == 1
result = gds.graph.list(G)
assert result["graphName"][0] == GRAPH_NAME
def test_graph_exists(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
result = gds.graph.exists(G.name())
assert result["exists"]
result = gds.graph.exists("bogusName")
assert not result["exists"]
def test_graph_drop(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
result = gds.graph.drop(G, True)
assert result is not None
assert result["graphName"] == GRAPH_NAME
with pytest.raises(Exception):
gds.graph.drop(G, True)
def test_graph_export(runner: QueryRunner, gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
MY_DB_NAME = "testdatabase"
result = gds.graph.export(G, dbName=MY_DB_NAME, batchSize=10000)
assert result["graphName"] == GRAPH_NAME
assert result["dbName"] == MY_DB_NAME
runner.run_query("CREATE DATABASE $dbName", {"dbName": MY_DB_NAME})
runner.set_database(MY_DB_NAME)
node_count = runner.run_query("MATCH (n) RETURN COUNT(n) AS c").squeeze()
assert node_count == 3
runner.run_query("DROP DATABASE $dbName", {"dbName": MY_DB_NAME})
runner.set_database(DEFAULT_DATABASE)
def test_graph_get(gds: GraphDataScience) -> None:
gds.graph.project(GRAPH_NAME, "*", "*")
G = gds.graph.get(GRAPH_NAME)
assert G.name() == GRAPH_NAME
with pytest.raises(
ValueError,
match=f"No projected graph named 'bogusName' exists in current database '{gds.database()}'",
):
gds.graph.get("bogusName")
def test_graph_streamNodeProperty(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds.graph.streamNodeProperty(G, "x", concurrency=2)
assert {e for e in result["propertyValue"]} == {1, 2, 3}
def test_graph_streamNodeProperty_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds_without_arrow.graph.streamNodeProperty(G, "x", concurrency=2)
assert {e for e in result["propertyValue"]} == {1, 2, 3}
def test_graph_streamNodeProperties(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds.graph.streamNodeProperties(G, ["x", "y"], concurrency=2)
assert list(result.keys()) == ["nodeId", "nodeProperty", "propertyValue"]
x_values = result[result.nodeProperty == "x"]
assert {e for e in x_values["propertyValue"]} == {1, 2, 3}
y_values = result[result.nodeProperty == "y"]
assert {e for e in y_values["propertyValue"]} == {2, 3, 4}
def test_graph_streamNodeProperties_separate_property_columns(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds.graph.streamNodeProperties(G, ["x", "y"], separate_property_columns=True, concurrency=2)
assert list(result.keys()) == ["nodeId", "x", "y"]
assert {e for e in result["x"]} == {1, 2, 3}
assert {e for e in result["y"]} == {2, 3, 4}
def test_graph_streamNodeProperties_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds_without_arrow.graph.streamNodeProperties(G, ["x", "y"], concurrency=2)
assert list(result.keys()) == ["nodeId", "nodeProperty", "propertyValue"]
x_values = result[result.nodeProperty == "x"]
assert {e for e in x_values["propertyValue"]} == {1, 2, 3}
y_values = result[result.nodeProperty == "y"]
assert {e for e in y_values["propertyValue"]} == {2, 3, 4}
def test_graph_streamNodeProperties_without_arrow_separate_property_columns(
gds_without_arrow: GraphDataScience,
) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds_without_arrow.graph.streamNodeProperties(G, ["x", "y"], separate_property_columns=True, concurrency=2)
assert list(result.keys()) == ["nodeId", "x", "y"]
assert {e for e in result["x"]} == {1, 2, 3}
assert {e for e in result["y"]} == {2, 3, 4}
def test_graph_streamRelationshipProperty(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", {"REL": {"properties": "relX"}})
result = gds.graph.streamRelationshipProperty(G, "relX", concurrency=2)
assert {e for e in result["propertyValue"]} == {4, 5, 6}
def test_graph_streamRelationshipProperty_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, "*", {"REL": {"properties": "relX"}})
result = gds_without_arrow.graph.streamRelationshipProperty(G, "relX", concurrency=2)
assert {e for e in result["propertyValue"]} == {4, 5, 6}
def test_graph_streamRelationshipProperties(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds.graph.streamRelationshipProperties(G, ["relX", "relY"], concurrency=2)
assert list(result.keys()) == [
"sourceNodeId",
"targetNodeId",
"relationshipType",
"relationshipProperty",
"propertyValue",
]
x_values = result[result.relationshipProperty == "relX"]
assert {e for e in x_values["propertyValue"]} == {4, 5, 6}
y_values = result[result.relationshipProperty == "relY"]
assert {e for e in y_values["propertyValue"]} == {5, 6, 7}
def test_graph_streamRelationshipProperties_separate_property_columns(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds.graph.streamRelationshipProperties(G, ["relX", "relY"], separate_property_columns=True, concurrency=2)
assert list(result.keys()) == ["sourceNodeId", "targetNodeId", "relationshipType", "relX", "relY"]
assert {e for e in result["relX"]} == {4, 5, 6}
assert {e for e in result["relY"]} == {5, 6, 7}
def test_graph_streamRelationshipProperties_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds_without_arrow.graph.streamRelationshipProperties(G, ["relX", "relY"], concurrency=2)
assert list(result.keys()) == [
"sourceNodeId",
"targetNodeId",
"relationshipType",
"relationshipProperty",
"propertyValue",
]
x_values = result[result.relationshipProperty == "relX"]
assert {e for e in x_values["propertyValue"]} == {4, 5, 6}
y_values = result[result.relationshipProperty == "relY"]
assert {e for e in y_values["propertyValue"]} == {5, 6, 7}
def test_graph_streamRelationshipProperties_without_arrow_separate_property_columns(
gds_without_arrow: GraphDataScience,
) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds_without_arrow.graph.streamRelationshipProperties(
G, ["relX", "relY"], separate_property_columns=True, concurrency=2
)
assert list(result.keys()) == ["sourceNodeId", "targetNodeId", "relationshipType", "relX", "relY"]
assert {e for e in result["relX"]} == {4, 5, 6}
assert {e for e in result["relY"]} == {5, 6, 7}
def test_graph_writeNodeProperties(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
gds.pageRank.mutate(G, mutateProperty="rank", dampingFactor=0.2, tolerance=0.3)
result = gds.graph.writeNodeProperties(G, ["rank"], concurrency=2)
assert result["propertiesWritten"] == 3
def test_graph_writeRelationship(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
gds.nodeSimilarity.mutate(G, mutateRelationshipType="SIMILAR", mutateProperty="score", similarityCutoff=0)
result = gds.graph.writeRelationship(G, "SIMILAR", "score", concurrency=2)
assert result["relationshipsWritten"] == 2
assert result["propertiesWritten"] == 2
@pytest.mark.compatible_with(min_inclusive=ServerVersion(2, 1, 0))
def test_graph_removeNodeProperties_21(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds.graph.removeNodeProperties(G, ["x"], concurrency=2)
assert result["propertiesRemoved"] == 3
@pytest.mark.compatible_with(max_exclusive=ServerVersion(2, 1, 0))
def test_graph_removeNodeProperties_20(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds.graph.removeNodeProperties(G, ["x"], ["*"], concurrency=2)
assert result["propertiesRemoved"] == 3
def test_graph_deleteRelationships(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", ["REL", "REL2"])
result = gds.graph.deleteRelationships(G, "REL")
assert result["deletedRelationships"] == 3
def test_graph_generate(gds: GraphDataScience) -> None:
G, result = gds.beta.graph.generate(GRAPH_NAME, 12, 2)
assert G.node_count() == 12
assert result["generateMillis"] >= 0
@pytest.mark.enterprise
@pytest.mark.compatible_with(min_inclusive=ServerVersion(2, 1, 0))
def test_graph_construct(gds: GraphDataScience) -> None:
nodes = pandas.DataFrame({"nodeId": [0, 1, 2, 3]})
relationships = pandas.DataFrame({"sourceNodeId": [0, 1, 2, 3], "targetNodeId": [1, 2, 3, 0]})
G = gds.alpha.graph.construct("hello", nodes, relationships)
assert G.name() == "hello"
assert G.node_count() == 4
assert G.relationship_count() == 4
G.drop()
@pytest.mark.enterprise
@pytest.mark.compatible_with(min_inclusive=ServerVersion(2, 1, 0))
def test_graph_construct_multiple_dfs(gds: GraphDataScience) -> None:
nodes = [pandas.DataFrame({"nodeId": [0, 1]}), | pandas.DataFrame({"nodeId": [2, 3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun May 8 18:29:53 2016
@author: bmanubay
"""
# Check what moelcules we have appear in Chris's list
import pandas as pd
# read in ; delimited csv of comp/mix counts created in thermomlcnts.py
a0 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/allcomp_counts_all.csv", sep=';')
a1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/allcomp_counts_interesting.csv", sep=';')
a2 = | pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/bincomp_counts_all.csv", sep=';') | pandas.read_csv |
import os
import cv2
import numpy as np
import pandas as pd
from catalyst import utils
from catalyst.dl import InferCallback, MetricCallback, State, Callback, CallbackOrder
from pytorch_toolbelt.inference import tta
from src.utils import mask2rle, mean_dice_coef, post_process, sigmoid, single_dice_coef
class PostprocessingCallback(InferCallback):
def __init__(self):
super().__init__()
self.valid_masks = []
self.probabilities = []
def on_stage_start(self, state: State):
print("Stage 3 started!")
def on_batch_end(self, state: State):
output = state.batch_out["logits"]
input_masks = state.batch_in["targets"]
for mask in input_masks:
for m in mask:
m = m.cpu().detach().numpy()
if m.shape != (350, 525):
m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
self.valid_masks.append(m)
for prob in output:
for probability in prob:
probability = probability.cpu().detach().numpy()
if probability.shape != (350, 525):
probability = cv2.resize(
probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR
)
self.probabilities.append(probability)
def on_stage_end(self, state: State):
class_params = {}
for class_id in range(4):
print(class_id)
attempts = []
for t in range(0, 100, 10):
t /= 100
for ms in [
0,
1000,
5000,
10000,
11000,
14000,
15000,
16000,
18000,
19000,
20000,
21000,
23000,
25000,
27000,
]:
masks = []
for i in range(class_id, len(self.probabilities), 4):
probability = self.probabilities[i]
predict, num_predict = post_process(sigmoid(probability), t, ms)
masks.append(predict)
d = []
for i, j in zip(masks, self.valid_masks[class_id::4]):
d.append(single_dice_coef(y_pred_bin=i, y_true=j))
attempts.append((t, ms, np.mean(d)))
attempts_df = pd.DataFrame(attempts, columns=["threshold", "size", "dice"])
attempts_df = attempts_df.sort_values("dice", ascending=False)
print(attempts_df.head())
best_threshold = attempts_df["threshold"].values[0]
best_size = attempts_df["size"].values[0]
class_params[class_id] = (best_threshold, best_size)
np.save("./logs/class_params.npy", class_params)
class CustomInferCallback(Callback):
def __init__(self, **kwargs):
super().__init__(CallbackOrder.External)
print("Custom infer callback is initialized")
self.path = kwargs.get("path", None)
self.threshold = kwargs.get("threshold", None)
self.min_size = kwargs.get("min_size", None)
self.class_params = dict()
self.encoded_pixels = [None for i in range(14792)]
self.pred_distr = {-1: 0, 0: 0, 1: 0, 2: 0, 3: 0}
self.image_id = 0
self.tta = kwargs.get("tta", None)
def on_stage_start(self, state: "State"):
if self.tta is not None:
# state.model = tta.SegmentationTTAWrapper(state.model, tta.aliases.d4_transform())
state.model = tta.TTAWrapper(state.model, tta.d4_image2mask)
print(f"tta model created! type={type(state.model)}")
def on_batch_end(self, state: State):
# print(type(state.model))
# if not isinstance(state.model, tta.wrappers.SegmentationTTAWrapper):
# print("Not instance of tta")
# exit()
for prob in state.batch_out["logits"]:
for probability in prob:
probability = probability.detach().cpu().numpy()
if probability.shape != (350, 525):
probability = cv2.resize(
probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR
)
prediction, num_predict = post_process(
sigmoid(probability),
threshold=self.threshold,
min_size=self.min_size,
)
if num_predict == 0:
self.pred_distr[-1] += 1
self.encoded_pixels[self.image_id] = ""
else:
self.pred_distr[self.image_id % 4] += 1
r = mask2rle(prediction)
self.encoded_pixels[self.image_id] = r
self.image_id += 1
def on_stage_end(self, state: State):
np.save("./logs/pred_distr.npy", self.pred_distr)
sub = | pd.read_csv(f"{self.path}/sample_submission.csv") | pandas.read_csv |
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
Series,
_testing as tm,
concat,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
pytestmark = [
pytest.mark.single,
# pytables https://github.com/PyTables/PyTables/issues/822
pytest.mark.filterwarnings(
"ignore:a closed node found in the registry:UserWarning"
),
]
def test_categorical(setup_path):
with ensure_clean_store(setup_path) as store:
# Basic
_maybe_remove(store, "s")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s", s, format="table")
result = store.select("s")
tm.assert_series_equal(s, result)
_maybe_remove(store, "s_ordered")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
)
store.append("s_ordered", s, format="table")
result = store.select("s_ordered")
tm.assert_series_equal(s, result)
_maybe_remove(store, "df")
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append("df", df, format="table")
result = store.select("df")
tm.assert_frame_equal(result, df)
# Dtypes
_maybe_remove(store, "si")
s = Series([1, 1, 2, 2, 3, 4, 5]).astype("category")
store.append("si", s)
result = store.select("si")
tm.assert_series_equal(result, s)
_maybe_remove(store, "si2")
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype("category")
store.append("si2", s)
result = store.select("si2")
tm.assert_series_equal(result, s)
# Multiple
_maybe_remove(store, "df2")
df2 = df.copy()
df2["s2"] = Series(list("abcdefg")).astype("category")
store.append("df2", df2)
result = store.select("df2")
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert "/df2 " in info
# assert '/df2/meta/values_block_0/meta' in info
assert "/df2/meta/values_block_1/meta" in info
# unordered
_maybe_remove(store, "s2")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s2", s, format="table")
result = store.select("s2")
tm.assert_series_equal(result, s)
# Query
_maybe_remove(store, "df3")
store.append("df3", df, data_columns=["s"])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["d"])]
result = store.select("df3", where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["f"])]
result = store.select("df3", where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append("df3", df)
df = concat([df, df])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3["s"] = df3["s"].cat.remove_unused_categories()
msg = "cannot append a categorical with different categories to the existing"
with pytest.raises(ValueError, match=msg):
store.append("df3", df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select("df3/meta/s/meta")
assert result is not None
store.remove("df3")
with pytest.raises(
KeyError, match="'No object named df3/meta/s/meta in the file'"
):
store.select("df3/meta/s/meta")
def test_categorical_conversion(setup_path):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ["ESP_012345_6789", "ESP_987654_3210"]
imgids = ["APF00006np", "APF0001imm"]
data = [4.3, 9.8]
# Test without categories
df = DataFrame({"obsids": obsids, "imgids": imgids, "data": data})
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype("category")
df.imgids = df.imgids.astype("category")
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(setup_path):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = DataFrame(
{
"a": ["a", "b", "c", np.nan],
"b": [np.nan, np.nan, np.nan, np.nan],
"c": [1, 2, 3, 4],
"d": | Series([None] * 4, dtype=object) | pandas.Series |
from models import MotionDetection, ObjectDetection
import config
import pandas as pd
from datetime import datetime
import logging
from edgetpu.detection.engine import DetectionEngine
from database import Session
def get_obj_det_comps(model_file: str, labels_file: str) -> tuple:
"""Load object detection model file and labels"""
logging.info("Loading Edge TPU object detection model and labels")
model = DetectionEngine(model_file)
labels = {}
# loop over the class labels file
for row in open(labels_file):
# unpack the row and update the labels dictionary
(classID, curr_label) = row.strip().split(maxsplit=1)
labels[int(classID)] = curr_label.strip()
return model, labels
def save_detections(detections: list) -> None:
"""Save detections into DB"""
logging.debug(f'Saving {len(detections)} detection(s)')
try:
# stage detections for DB insert
Session.add_all(detections)
# flush inserts into DB
Session.commit()
except Exception as e:
logging.error(f'Detection(s) not saved: {str(e)}')
def get_max_obj_ids(now, db_conn) -> dict:
"""Get a dictionary of labels and max object IDs for current date"""
# calculate start of current hour
curr_dt_start = f'{str(now.date())} 00:00:00'
# fetch results from DB
df = pd.read_sql(f"""
SELECT label, COUNT (DISTINCT obj_id) as next_obj_id
FROM object_detections
WHERE create_ts >= :curr_dt_start
GROUP BY 1
""", params={'curr_dt_start': curr_dt_start}, con=db_conn)
# convert dataframe to a {label:count} dictionary
return {rec['label']: rec['next_obj_id'] for rec in df.to_dict(orient='records')}
def get_motion_analysis(db_conn) -> list:
"""Get means of motion detections by hour for last N-days and today"""
# fetch results from DB
df = pd.read_sql(f"""
SELECT id, create_ts, '1' as motion_count
FROM motion_detections
WHERE create_ts BETWEEN datetime('now', '-{config.USE_HISTORICAL_DAYS} days') AND datetime('now', 'localtime')
GROUP BY 1,2,3
""", con=db_conn)
# update data types
df.create_ts = pd.to_datetime(df.create_ts)
df.motion_count = df.motion_count.astype(int)
# resample data by hour
motion_det_df_resampled = df.set_index('create_ts').resample(
'H').count().reset_index()[['create_ts', 'motion_count']].fillna(0)
# split historical dates and today
td = datetime.now().date()
motion_det_df_resampled_hist = motion_det_df_resampled.loc[motion_det_df_resampled['create_ts'].dt.date != td]
motion_det_df_resampled_td = motion_det_df_resampled.loc[motion_det_df_resampled['create_ts'].dt.date == td]
# calculate avg hourly count for historical detections
motion_det_df_resampled_avg_hist = motion_det_df_resampled_hist.groupby(
motion_det_df_resampled_hist.create_ts.dt.hour)['motion_count'].mean()
hist = motion_det_df_resampled_avg_hist.reset_index()
hist.columns = ['Hour', 'Historical']
# calculate hourly count for today's detections
motion_det_df_resampled_avg_td = motion_det_df_resampled_td.groupby(
motion_det_df_resampled_td.create_ts.dt.hour)['motion_count'].sum()
today = motion_det_df_resampled_avg_td.reset_index()
today.columns = ['Hour', 'Today']
# return merged: historical and today's datasets
return hist.merge(today, how='left').fillna(0).to_dict(orient='records')
def get_objects_analysis(db_conn) -> dict:
"""Get means of object detections by hour for last N-days and today"""
# fetch detections from db
object_det_df = | pd.read_sql(f"""
SELECT id, label, create_ts, obj_id
FROM object_detections
WHERE create_ts BETWEEN datetime('now', '-{config.USE_HISTORICAL_DAYS} days') AND datetime('now', 'localtime')
GROUP BY 1,2,3,4
""", con=db_conn) | pandas.read_sql |
import pandas as pd
def deep_get(obj, path):
if not path or not obj:
return obj
return deep_get(obj.get(path[0]), path[1:])
class Messages:
def __init__(self, session):
self._session = session
def get_messages(self, start, end):
url = "messages/cursor"
data = {"from": start, "until": end}
cursor_request = self._session.request("POST", url, json=data)
cursor_request.raise_for_status()
cursor = cursor_request.json()["cursor"]
response_list = []
i = 0
while cursor:
i += 1
print("Iteration #: ", i)
response = self._session.request("GET", url + f"/{cursor}")
response.raise_for_status()
for row in response.json()["data"]:
response_list.append(row)
cursor = deep_get(response.json(), ["paging", "next"])
contacts, inbound_messages, outbound_messages = [], [], []
for obj in response_list:
if "_vnd" not in obj.keys():
contacts.append(pd.json_normalize(obj["contacts"], sep="_"))
inbound_messages.append(
pd.json_normalize(obj["messages"], sep="_")
)
elif "_vnd" in obj.keys():
outbound_messages.append(pd.json_normalize(obj, sep="_"))
try:
df_inbound = pd.concat(
[pd.concat(contacts), pd.concat(inbound_messages)], axis=1
)
except ValueError:
df_inbound = pd.DataFrame()
try:
df_outbound = pd.concat(outbound_messages)
except ValueError:
df_outbound = | pd.DataFrame() | pandas.DataFrame |
import os, re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib import rcParams
from matplotlib.ticker import MultipleLocator
from floris.utils.tools import valid_ops as vops
root = os.path.dirname(os.path.dirname(__file__))
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# PLOT_CONFIGURATION #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
font1 = {'family': 'Times New Roman',
'weight': 'bold',
'size': 15}
font2 = {'family': 'Times New Roman',
'weight': 'bold',
'size': 10}
font3 = {'family': 'Times New Roman',
'weight': 'bold',
'size': 13}
font4 = {'family': 'Times New Roman',
'weight': 'bold',
'size': 15,
'color': 'b',}
font5 = {'family': 'Times New Roman',
'weight': 'bold',
'size': 20}
line_styles = ["-", "--", "-.", ":", "-", "--", ]
color_group = ['k', 'b', 'y', 'r', 'g', ]
line_maker_style = dict(linestyle=":",
linewidth=2,
color="maroon",
markersize=15)
marker_styles = ["o", "D", "$\diamondsuit$",
"+", "x", "s", "^", "d", ">", "<", "v"]
marker_sizes = [5, 5, 5, 5]
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# VALIDATION #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def layout_plot(coordinates, annotate=False):
x, y = coordinates[:, 0], coordinates[:, 1]
plt.figure(dpi=100)
plt.scatter(x, y)
if annotate:
num_labs = [str(i) for i in range(1, 81)]
for i in range(len(num_labs)):
plt.annotate(num_labs[i], xy=(x[i], y[i]),
xytext=(x[i] + 50, y[i] + 50))
# plt.xlim((-8 * 80., 80 * 80.));plt.ylim((-4 * 80., 70 * 80.))
plt.show()
def wt_power_eval(legend, data, **kwargs):
fig = plt.figure(dpi=200)
ax = fig.add_subplot(111)
colors = list(mcolors.TABLEAU_COLORS.keys())
x = np.arange(1, data.shape[1] + 1)
# if kwargs.get("ref", False):
if kwargs.__contains__("ref"):
for i, tag in enumerate(kwargs["ref"].columns):
if tag.split("+")[-1] in ["OBS", ]:
continue
tag_i = tag.split('+')[-1]
ax.plot(x, kwargs["ref"][tag].values,
linestyle=line_styles[i],
c='w',
lw=0.00,
label=tag_i,
markersize=10,
marker=marker_styles[i],
markeredgecolor='k',
markeredgewidth=1.5)
for i in range(data.shape[0]):
legend_ii = legend[i].split('+')[-2:]
legend_i = ['BP' if i == 'Bastankhah' else i for i in legend_ii]
# legend_i = [legend_i[0], legend_i[2], legend_i[1]]
ax.plot(x, vops.normalized_wt_power(data[i, :]),
color=color_group[i], linewidth=2,
linestyle=line_styles[i],
markersize=0,
marker=marker_styles[-(i + 1)],
label=' + '.join(legend_i))
title = ""
ax = general_axes_property(ax, 'Turbine Row', 'Normalized Power',
(0.5, data.shape[1] + 0.5), (0.1, 1.05),
1, title)
if kwargs.get("psave", False):
plt.savefig("{}/output/{}.png".format(root, kwargs["psave"]), format='png',
dpi=300, bbox_inches='tight')
print("** Picture {} Save Done !** \n".format(kwargs["psave"]))
if kwargs.get("dsave", False):
pd.DataFrame(data.T, columns=legend).to_csv(
"{}/output/{}.csv".format(root, kwargs["dsave"]))
print("** Data {} Save Done !** \n".format(kwargs["dsave"]))
if kwargs.get("show", True):
plt.show()
def wf_power_eval(legend, data, **kwargs):
fig = plt.figure(dpi=200)
ax = fig.add_subplot(111)
colors = list(mcolors.TABLEAU_COLORS.keys())
for i in range(data.shape[0]):
winds, label = wind_range_from_label(legend[i])
ax.plot(winds, data[i, :], color=colors[i], linewidth=1.5,
linestyle=line_styles[i], label=label)
winds = wind_range_from_label(legend[0])[0]
title = 'Distribution of the normalized Horns Rev wind farm power ouput'
ax = general_axes_property(ax, 'Wind Direction(degree)', 'Normalized Power',
(winds[0] - (len(winds) / 10),
winds[-1] + (len(winds) / 10)),
(0.35, 1.05), int(len(winds) / 10), title)
if kwargs.get("psave", False):
plt.savefig("output/{}.png".format(kwargs["psave"]), format='png',
dpi=300, bbox_inches='tight')
print("** Picture {} Save Done ! **".format(kwargs["psave"]))
if kwargs.get("dsave", False):
| pd.DataFrame(data.T, columns=legend) | pandas.DataFrame |
import pandas
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.multiclass import OneVsOneClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
import utils
targetClass = 3
data = utils.getDataset(targetClass)
data = utils.MapLabels(targetClass,data)
data = utils.preprocess(data)
model = Pipeline([
('vect', CountVectorizer(lowercase = True,max_features = 4000,ngram_range=(1, 2))),
('tfidf', TfidfTransformer()),
('clf', OneVsOneClassifier(LinearSVC())),
])
kf = KFold(n_splits=10, random_state=43, shuffle=True)
accurs = []
conf_matrix = []
tp = []
tn = []
fp = []
fn = []
for train_index, test_index in kf.split(data):
X_train, X_test = data.iloc[train_index]['Comment'], data.iloc[test_index]['Comment']
y_train, y_test = data.iloc[train_index]['Label'], data.iloc[test_index]['Label']
model.fit(pandas.np.asarray(X_train), | pandas.np.asarray(y_train) | pandas.np.asarray |
from eagles.Supervised.utils import plot_utils as pu
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.base import clone
from sklearn.model_selection import train_test_split
from scipy import stats
def select_features(
X=None,
y=None,
methods=[],
problem_type="clf",
model_pipe=None,
imp_thresh=0.005,
corr_thresh=0.7,
bin_fts=None,
dont_drop=None,
random_seed=None,
n_jobs=None,
plot_ft_importance=False,
plot_ft_corr=False,
):
"""
Function to reduce feature set size
Expects:
X - pandas df containing the feature columns
y - pandas series containg the outcomes
imp_thresh - min importance threshold for the rf importance (below thresh cut)
corr_thresh - correlation threshold where fts above thresh are cut
nonbin_fts - list of col names with non binarized features
display_imp - coolean if true then displays the feature importances of top 20
dont_drop - list of col names don't want to drop regradless of corr or importance
Returns: list of included features, list of dropped features
"""
if len(methods) == 0:
print("NO SELECT FEATURES METHODS PASSED")
return
# get the initial features
ft_cols = list(X.columns[:])
print("Init number of features: " + str(len(ft_cols)) + " \n")
imp_drop = []
lin_drop = []
corr_drop = []
if dont_drop is None:
dont_drop = []
if "correlation" in methods:
# correlation drop
corr_fts = [x for x in X.columns if x not in bin_fts]
correlations = X[corr_fts].corr()
if plot_ft_corr:
pu.plot_feature_correlations(
df=X[corr_fts].copy(deep=True),
plot_title="Feature Correlation Pre-Drop",
)
upper = correlations.where(
np.triu(np.ones(correlations.shape), k=1).astype(np.bool)
)
corr_drop = [
column for column in upper.columns if any(upper[column].abs() > corr_thresh)
]
# drop the correlation features first then fit the models
print("Features dropping due to high correlation: " + str(corr_drop) + " \n")
ft_cols = [x for x in ft_cols if (x not in corr_drop) or (x in dont_drop)]
X = X[ft_cols].copy(deep=True)
# Model importance
X_train, X_test, y_train, y_test = train_test_split(
X[ft_cols], y, test_size=0.2, random_state=random_seed
)
if "rf_importance" in methods:
if problem_type == "clf":
forest = RandomForestClassifier(
n_estimators=200, random_state=random_seed, n_jobs=n_jobs
)
else:
forest = RandomForestRegressor(
n_estimators=200, random_state=random_seed, n_jobs=n_jobs
)
if model_pipe:
tmp_pipe = clone(model_pipe)
tmp_pipe.steps.append(["mod", forest])
forest = clone(tmp_pipe)
forest.fit(X_train, y_train)
if model_pipe:
forest = forest.named_steps["mod"]
rf_importances = forest.feature_importances_
ftImp = {"Feature": ft_cols, "Importance": rf_importances}
ftImp_df = pd.DataFrame(ftImp)
ftImp_df.sort_values(["Importance"], ascending=False, inplace=True)
imp_drop = list(ftImp_df[ftImp_df["Importance"] < imp_thresh]["Feature"])
print("Features dropping from low importance: " + str(imp_drop) + " \n")
if plot_ft_importance:
pu.plot_feature_importance(
ft_df=ftImp_df,
mod_type=type(forest).__name__,
plot_title="RF Feature Selection Importance",
)
if "regress" in methods:
if problem_type == "clf":
lin_mod = LogisticRegression(
penalty="l1", solver="liblinear", random_state=random_seed
)
else:
lin_mod = Lasso(random_state=random_seed)
if model_pipe:
tmp_pipe = clone(model_pipe)
tmp_pipe.steps.append(["mod", lin_mod])
lin_mod = clone(tmp_pipe)
lin_mod.fit(X_train, y_train)
if model_pipe:
lin_mod = lin_mod.named_steps["mod"]
if problem_type == "clf":
tmp = pd.DataFrame({"Feature": ft_cols, "Coef": lin_mod.coef_[0]})
else:
tmp = pd.DataFrame({"Feature": ft_cols, "Coef": lin_mod.coef_})
lin_drop = list(tmp["Feature"][tmp["Coef"] == 0])
print("Features dropping from l1 regression: " + str(lin_drop) + " \n")
if plot_ft_importance:
pu.plot_feature_importance(
ft_df=tmp,
mod_type=type(lin_mod).__name__,
plot_title="Logistic l1 Feature Selection Coefs",
)
# get the final drop and feature sets
drop_fts = list(set(imp_drop + lin_drop + corr_drop))
sub_fts = [col for col in ft_cols if (col not in drop_fts) or (col in dont_drop)]
print("Final number of fts : " + str(len(sub_fts)) + "\n \n")
print("Final features: " + str(sub_fts) + "\n \n")
print("Dropped features: " + str(drop_fts) + "\n \n")
return sub_fts, drop_fts
def create_bin_table(df=None, bins=None, bin_col=None, actual_col=None):
"""
Function to generate the bin tables with percents
Expects: df - pandas df from reco weighting containing rectaken_01 and col to be binned
bins - default to prob taken bins unless passed list of bin steps i.e. [x/100 for x in range(-5,105,5)]
bin_col - name of the col to be binned
save_dir - directory to save the pandas dataframe out to
Returns: Saves the generated dataframe out to csv
"""
# Generate the bin col name
bin_col_name = bin_col + "_bin"
# Generate the list of bins (go by 5%)
# default to prob taken include -5 so that anything at 0 will have bin and go above 100 so
# that include values in bins from 95 to 100
if bins is None:
bin_list = [x / 100 for x in range(-5, 105, 5)]
else:
bin_list = bins
# create the bins
df[bin_col_name] = pd.cut(df[bin_col], bin_list)
# get the counts for the number of obs in each bin and the percent taken in each bin
cnts = df[bin_col_name].value_counts().reset_index()
cnts.columns = [bin_col_name, "count"]
# Get the percent ivr per bin
percs = df.groupby(by=bin_col_name)[actual_col].mean().reset_index()
percs.columns = [bin_col_name, "percent_actual"]
# combine the counts and the percents, sort the table by bin and write the table out
wrt_table = cnts.merge(
percs, left_on=bin_col_name, right_on=bin_col_name, how="inner"
)
wrt_table.sort_values(by=bin_col_name, inplace=True)
# calc the correlation between probab bin rank and the percent actual
# asssumes table in order at this point
if wrt_table.isnull().values.any():
return wrt_table, np.nan
else:
ranks = [i for i in range(wrt_table.shape[0])]
corr, p = stats.pearsonr(ranks, wrt_table["percent_actual"])
return [wrt_table, corr]
def get_feature_importances(mod_name=None, mod=None, features=None):
features = ["ft_" + str(ft) if isinstance(ft, int) else ft for ft in features]
if (
("RandomForest" in mod_name)
or ("GradientBoosting" in mod_name)
or ("DecisionTree" in mod_name)
or ("ExtraTrees" in mod_name)
):
importance_values = mod.feature_importances_
ftImp = {"Feature": features, "Importance": importance_values}
ftImp_df = pd.DataFrame(ftImp)
# display_imp is true then plot the importance values of the features
ftImp_df = ftImp_df.sort_values(["Importance"], ascending=False).reset_index(
drop=True
)
return ftImp_df
elif ("Regression" in mod_name) or (
mod_name in ["Lasso", "ElasticNet", "PoissonRegressor"]
):
if mod_name == "LogisticRegression":
tmp = | pd.DataFrame({"Feature": features, "Coef": mod.coef_[0]}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import csv
import numpy as np
import sys
import pandas as pd
import itertools
import math
import time
from sklearn import svm, linear_model, neighbors
from sklearn import tree, ensemble
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import StratifiedKFold
import random
import numbers
import argparse
import os
import json
def predict(X_new, model):
probs= model.predict_proba(X_new)
scores = list(zip(test_df['Ent'],test_df['Customer'],probs[:,1]))
scores.sort(key=lambda tup: tup[2],reverse=True)
scores_df = pd.DataFrame(scores,columns=['Supplier','Customer','Prob'])
return scores_df
def get_scores(clf, X_new, y_new):
scoring = ['precision', 'recall', 'accuracy', 'roc_auc', 'f1', 'average_precision']
scorers, multimetric = metrics.scorer._check_multimetric_scoring(clf, scoring=scoring)
#print(scorers)
scores = multimetric_score(clf, X_new, y_new, scorers)
return scores
def crossvalid(train_df, test_df):
features_cols= train_df.columns.difference(['Entity1','Entity1' ,'Class'])
X=train_df[features_cols].values
y=train_df['Class'].values.ravel()
X_new=test_df[features_cols].values
y_new=test_df['Class'].values.ravel()
nb_model = GaussianNB()
nb_model.fit(X,y)
nb_scores = get_scores(nb_model, X_new, y_new)
logistic_model = linear_model.LogisticRegression(C=0.01)
logistic_model.fit(X,y)
lr_scores = get_scores(logistic_model, X_new, y_new)
rf_model = ensemble.RandomForestClassifier(n_estimators=200, n_jobs=10)
rf_model.fit(X,y)
rf_scores = get_scores(rf_model, X_new, y_new)
#sclf_scores = stacking(train_df, test_df)
#clf = ensemble.RandomForestClassifier(n_estimators=100
return nb_scores,lr_scores, rf_scores#, sclf_scores
def multimetric_score(estimator, X_test, y_test, scorers):
"""Return a dict of score for multimetric scoring"""
scores = {}
for name, scorer in scorers.items():
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
scores[name] = score
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) "
"instead. (scorer=%s)"
% (str(score), type(score), name))
return scores
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train', required=True, dest='train', help="enter train examples file")
parser.add_argument('-test', required=True, dest='test', help="enter test fact file")
#parser.add_argument('-pos', required=True, dest='positive', help="enter postive example file")
#parser.add_argument('-neg', required=True, dest='negative', help="enter negative exmaple file")
parser.add_argument('-emb', required=True, dest='embeddings', help="enter embedding file")
parser.add_argument('-relmap', required=True, dest='relmapping',help="enter mapping file (relation_to_id.json)")
parser.add_argument('-otrain', required=True, dest='train_output', type=int, help="create file name for train features")
parser.add_argument('-otest', required=True, dest='test_output', type=int, help="create file name for test features")
parser.add_argument('-predict', required=True, dest='predict', type=int, help="create file name for prediction output")
args = parser.parse_args()
#train_pos_file = args.positive
#train_neg_file = args.negative
train_file = args.train
emb_file = args.embeddings
relmap_file = args.relmapping
test_file = args.test
train_output = args.train_output
test_output = args.test_output
predict = args.predict
print(args)
print ("Training file",train_file)
train_df =pd.read_csv(train_file, names=['Entity1','Relation','Entity2','Class'], sep='\t', header=None)
print ("number of train samples",len(train_df))
emb_df = pd.read_json(emb_file,orient='index')
emb_df.index.rename('Entity', inplace=True)
train_df = train_df.merge(emb_df, left_on='Entity1', right_on='Entity').merge(emb_df, left_on='Entity2', right_on='Entity')
print ("number of positives in train",(len(train_df[train_df['Class']==1])))
print ("number of neegatives in train",(len(train_df[train_df['Class']!=1])))
mapping = {}
with open(relmap_file, 'r') as json_data:
mapping = (json.load(json_data))
train_df.Relation = train_df.Relation.replace(mapping)
#print (train_df.head())
features_cols= train_df.columns.difference(['Entity1','Entity2' ,'Class'])
X=train_df[features_cols].values
y=train_df['Class'].values.ravel()
test_df = | pd.read_csv(test_file, names=['Entity1','Relation','Entity2','Class'], sep='\t', header=None) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.