prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
def one_way_anova(data, target, between, summary=None):
formula = "Q('%s') ~ " % target
formula += "C(Q('%s'))" % between
model = ols(formula, data=data).fit()
result = anova_lm(model)
result = result.rename(columns={
'sum_sq' : 'Sum Square',
'mean_sq' : 'Mean Square',
'F' : 'F Statistic',
'PR(>F)' : 'p-value'
})
result = result.rename(index={
"C(Q('%s'))" % between : between
})
result2 = pd.DataFrame(
{
"Count": data.groupby(between)[target].count(),
"Mean": data.groupby(between)[target].mean(),
"Median": data.groupby(between)[target].median(),
"Std.": data.groupby(between)[target].std(),
"Variance": data.groupby(between)[target].var()
}
)
result2.index.name = None
index_change = {}
for index in result2.index:
changed = "{}({})".format(between, index)
index_change[index] = changed
result2 = result2.rename(index_change)
if summary:
return result2
else:
return result
def two_way_anova(data, target, between, summary=None):
formula = "Q('%s') ~ " % target
formula += "C(Q('%s'), Sum) * " % between[0]
formula += "C(Q('%s'), Sum)" % between[1]
model = ols(formula, data=data).fit()
result = anova_lm(model)
result = result.rename(columns={
'sum_sq' : 'Sum Square',
'mean_sq' : 'Mean Square',
'F' : 'F Statistic',
'PR(>F)' : 'p-value'
})
index_change = {}
for index in result.index:
changed = index
for var in between:
changed = changed.replace("C(Q('%s'), Sum)" % var, var)
changed = changed.replace(":", " : ")
index_change[index] = changed
result = result.rename(index_change)
result2 = pd.DataFrame(columns=["Count", "Mean", "Median", "Std.", "Variance"])
for var in between:
temp = pd.DataFrame(
{
"Count": data.groupby(var)[target].count(),
"Mean": data.groupby(var)[target].mean(),
"Median": data.groupby(var)[target].median(),
"Std.": data.groupby(var)[target].std(),
"Variance": data.groupby(var)[target].var()
}
)
index_change = {}
for index in temp.index:
changed = "{}({})".format(var, index)
index_change[index] = changed
temp = temp.rename(index_change)
result2 = pd.concat([result2, temp])
if summary:
return result2
else:
return result
def n_way_anova(data, target, between, summary=None):
formula = "Q('%s') ~ " % target
for var in between:
formula += "C(Q('%s'), Sum) * " % var
formula = formula[:-3]
model = ols(formula, data=data).fit()
result = anova_lm(model)
result = result.rename(columns={
'sum_sq' : 'Sum Square',
'mean_sq' : 'Mean Square',
'F' : 'F Statistic',
'PR(>F)' : 'p-value'
})
index_change = {}
for index in result.index:
changed = index
for var in between:
changed = changed.replace("C(Q('%s'), Sum)" % var, var)
changed = changed.replace(":", " : ")
index_change[index] = changed
result = result.rename(index_change)
result2 = | pd.DataFrame(columns=["Count", "Mean", "Median", "Std.", "Variance"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime
from math import radians, cos, sin, asin, sqrt
def __init__():
print("Using DataFormatter Class")
def weekday(x):
"""
Figures out the day of the week. Outputs 1 for monday,2 for tuesday and so on.
"""
return (x.weekday()+1)
def is_weekend(x):
"""
Figures out if it was weekend. Outputs 0 or 1 for weekday or weekend.
"""
z = x.weekday()+1
return z//6
def hourly_info(x):
"""
separates the hour from time stamp. Returns hour of time.
"""
n1 = x.hour
return n1
def minute_info(x):
"""
separates the minutes from time stamp. Returns minute of time.
"""
n2 = x.minute
return n2/60
def haversine(x):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1 = x['pickup_longitude']
lat1 = x['pickup_latitude']
lon2 = x['dropoff_longitude']
lat2 = x['dropoff_latitude']
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def formatter(train):
#convert vendor id into one-hot
df_id = pd.DataFrame()
df_id['vendor_id'] = train['vendor_id']//2
df_id[['id']] = train[['id']].copy()
#print(df_id.head())
#convert flag into one-hot
tmp_df_flag = | pd.get_dummies(train['store_and_fwd_flag']) | pandas.get_dummies |
from app import app
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
import pandas as pd
import dash_table
import json
def update_interest_table():
"""
Connect slct_platform filter to ads areas of interest DataTable
Parameters:
fb-ads-interests (df) : dataframe with information for the facebook ads areas of interest
Return:
children (table) : the datatable to plot
"""
@app.callback(
Output("table", "children"),
[Input("slct_platform", "value")],
[State("fb-ads-interests", "data")],
)
def callback(slct_platform, data_fb):
# accumulate filtered platform dfs in a list
small_dfs = []
# check if facebook data exists
if data_fb is not None:
df_fb = pd.read_json(data_fb, orient="split")
small_dfs.append(df_fb)
if small_dfs:
# Concatenate all platform dfs in a single one
large_df = pd.concat(small_dfs, ignore_index=True)
platform = []
if isinstance(slct_platform, list) is False:
platform.append(slct_platform)
else:
platform = slct_platform
dff = large_df[large_df.platform.isin(platform)]
# Rename columns to get more explicit names on the webapp
dff.columns = ["Platform", "Advertising interests"]
# Change the order of columns to keep ads interest names on the left side
dff = dff.reindex(columns=["Advertising interests", "Platform"])
else:
no_data = ["Aucune donnée"]
dff = pd.DataFrame(
{
"Advertising interests": no_data,
"Platform": no_data,
}
)
children = dash_table.DataTable(
data=dff.to_dict("rows"),
columns=[{"id": c, "name": c} for c in dff.columns],
style_cell_conditional=[{"if": {"column_id": c}, "textAlign": "left"} for c in dff.columns],
style_as_list_view=True,
page_size=10,
style_header={"fontWeight": "bold"},
style_cell={
"backgroundColor": "rgba(0,0,0,0)",
"color": "#404040",
"font-family": "Lato",
},
)
return children
def update_interactions_chart():
"""
Connect slct_platform filter to interactions within the chart
Parameters:
fb-kpi-layer-2 (df) : dataframe with facebook information for the ads interactions
Return:
dff["count"].sum() (int) : the KPI to plot
"""
@app.callback(
Output("ads_interactions_text", "children"),
[Input("slct_platform", "value")],
[State("fb-kpi-layer-2", "data")],
)
def callback(slct_platform, data_fb):
# accumulate filtered dfs in this list
small_dfs = []
# check if facebook data exist
if data_fb is not None:
df_fb = pd.read_json(data_fb, orient="split")
dff_fb = df_fb[df_fb["info"] == "ads_interactions"]
small_dfs.append(dff_fb)
if small_dfs:
# Concatenate all platform dfs in a single one
large_df = pd.concat(small_dfs, ignore_index=True)
platform = []
if isinstance(slct_platform, list) is False:
platform.append(slct_platform)
else:
platform = slct_platform
dff = large_df[large_df.platform.isin(platform)]
ads_interactions = dff["count"].sum()
else:
ads_interactions = 0
return ads_interactions
def update_advertisers_chart():
"""
Connect slct_platform filter to advertisers within the chart
Parameters:
fb-kpi-layer-2 (df) : dataframe with facebook information for the advertisers
Return:
dff["count"].sum() (int) : the KPI to plot
"""
@app.callback(
Output("num_advertisers_text", "children"),
[Input("slct_platform", "value")],
[State("fb-kpi-layer-2", "data")],
)
def callback(slct_platform, data_fb):
# accumulate filtered dfs in this list
small_dfs = []
# check if facebook data exist
if data_fb is not None:
df_fb = | pd.read_json(data_fb, orient="split") | pandas.read_json |
from dataclasses import asdict
from pathlib import Path
from typing import Union
import pandas
from oidafuel.core import GasPrice, get_gas_stations_by_region, GasStationInfo
from oidafuel.datatypes import FuelType, GasStation
from oidafuel.econtrol import get_regions
from pandas import DataFrame
DATA_PATH = Path("data")
GAS_STATIONS_FILENAME = "gas_stations.csv"
REGIONS_FILENAME = "regions.csv"
CITIES_FILENAME = "cities.csv"
POSTAL_CODES_FILENAME = "postal_codes.csv"
fuel_type_names = {
FuelType.SUPER_95: "Super 95",
FuelType.DIESEL: "Diesel",
FuelType.CNG_ERDGAS: "CNG-Erdgas",
}
GAS_STATION_DTYPES = {
"station_id": "int64",
"name": "str",
"address": "str",
"postal_code": "str",
"city": "str",
"latitude": "float64",
"longitude": "float64",
}
def get_gas_stations_austria(fuel_type: FuelType) -> list[GasStation]:
regions = get_regions()
gas_stations: list[GasStation] = []
for region in regions:
print(
f"Checking {region.name} ({region.region_code}) "
f"for {fuel_type_names[fuel_type]} prices...",
)
for sub_region in region.sub_regions:
print(
f"\t{sub_region.name} "
f"({sub_region.region_code}) "
f"{fuel_type_names[fuel_type]}...",
end="",
)
new_stations = get_gas_stations_by_region(sub_region.region_code, fuel_type)
print(f"({len(new_stations)} found)")
gas_stations.extend(new_stations)
unique_stations = list(dict.fromkeys(gas_stations))
return unique_stations
def get_gas_stations_vienna(fuel_type: FuelType) -> list[GasStation]:
regions = get_regions()
vienna_region = regions[8]
assert vienna_region.name == "Wien"
gas_stations = []
for region in vienna_region.sub_regions:
print(
f"Checking {region.name} ({region.region_code}) "
f"for {fuel_type_names[fuel_type]} prices...",
end="",
)
new_stations = get_gas_stations_by_region(region.region_code, fuel_type)
print(f"({len(new_stations)} found)")
gas_stations.extend(new_stations)
unique_stations = list(dict.fromkeys(gas_stations))
return unique_stations
def save_gas_prices_to_file(gas_prices: list[GasPrice], file_name: Union[str, Path]):
dataframe = pandas.DataFrame(gas_prices)
file_path = DATA_PATH / file_name
kwargs = {"path_or_buf": file_path, "index": False, "mode": "a"}
if file_path.exists():
kwargs["header"] = False
print(f"Saving {len(gas_prices)} gas prices in {file_name}...")
file_path.parent.mkdir(parents=True, exist_ok=True)
dataframe.to_csv(**kwargs)
def update_dataframes(
original_df: DataFrame, new_df: DataFrame, sort_column
) -> DataFrame:
columns1 = list(original_df)
columns2 = list(new_df)
assert columns1 == columns2
assert (original_df.dtypes == new_df.dtypes).all()
df1 = original_df.sort_values(sort_column)
df2 = new_df.sort_values(sort_column)
assert len(df1) == len(original_df)
assert len(df2) == len(new_df)
unique_df1_ids = df1[sort_column].unique()
unique_df2_ids = df2[sort_column].unique()
try:
assert len(unique_df1_ids) == len(df1)
assert len(unique_df2_ids) == len(df2)
except AssertionError as e:
duplicates1 = df1[df1.station_id.duplicated()]
duplicates2 = df2[df2.station_id.duplicated()]
print("Duplicates 1:")
print(duplicates1)
print("Duplicates 2:")
print(duplicates2)
raise e
df = pandas.concat([df1, df2])
df.sort_values(sort_column, inplace=True)
df.drop_duplicates(sort_column, inplace=True)
assert len(df) >= len(df1)
assert len(df) >= len(df2)
return df
def read_gas_stations_file(
file_name: Union[str, Path] = "gas_stations.csv",
data_path: Path = DATA_PATH,
):
file_path = data_path / file_name
if file_path.exists():
return pandas.read_csv(file_path, dtype=GAS_STATION_DTYPES)
else:
data = {
key: pandas.Series(dtype=value) for key, value in GAS_STATION_DTYPES.items()
}
return | DataFrame(data) | pandas.DataFrame |
import os
import html5lib
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from datetime import date, timedelta, datetime as dt
from pymongo import MongoClient
from itertools import cycle
import numpy as np
# from kdriver import RemoteDriverStartService
class RemoteDriverStartService():
options = webdriver.ChromeOptions()
# Set user app data to a new directory
options.add_argument("user-data-dir=C:\\Users\\Donley\\App Data\\Google\\Chrome\\Application\\User Data\\Kit")
options.add_experimental_option("Proxy", "null")
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
# Create a download path for external data sources as default:
options.add_experimental_option("prefs", {
"download.default_directory": r"C:\Users\Donley\Documents\GA_TECH\SUBMISSIONS\PROJECT2-CHALLENGE\data\external",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
}),
# Add those optional features to capabilities
caps = options.to_capabilities()
def start_driver(self):
return webdriver.Remote(command_executor='http://127.0.0.1:4444',
desired_capabilities=self.caps)
# Connect to MongoDB
client = MongoClient("mongodb://localhost:27017")
db = client['investopedia']
def invsto_scrape():
# Set class equal to new capabilities:
DesiredCapabilities = RemoteDriverStartService()
# Create variables for scraping:
investo = "https://www.investopedia.com/top-communications-stocks-4583180"
# Download data to paths, csv's, json, etc:
# for external data sources
external = "../data/external/"
# for processed data sources with ID's
processed = "../data/processed/"
# Locate Driver in system
current_path = os.getcwd()
# save the .exe file under the same directory of the web-scrape python script.
Path = os.path.join(current_path, "chromedriver")
# Initialize Chrome driver and start browser session controlled by automated test software under Kit profile.
caps = webdriver.DesiredCapabilities.CHROME.copy()
caps['acceptInsecureCerts'] = True
# caps = webdriver.DesiredCapabilities.CHROME.copy()
# caps['acceptInsecureCerts'] = True
# driver = webdriver.Chrome(options=options, desired_capabilities=caps)
driver = webdriver.Chrome(executable_path='chromedriver', desired_capabilities=caps)
##Step 3: Find the IDs of the items we want to scrape for [5]
# Start Grabbing Information from investopedia:
driver.get(investo)
driver.maximize_window()
timeout = 30
# Find an ID on the page and wait before executing anything until found:
try:
WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, "main_1-0")))
except TimeoutException:
driver.quit()
##Step 5: The full code that runs the scraper and save the data to .csv files
itable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
itables = pd.read_html(itable)
communications_bv = itables[0]
communications_bv.columns = ["Communictaions Best Value", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
communications_bv
# Locate column containing ticker symbols:
communications_bv_df = communications_bv.iloc[1:]
# Only keep tick information within parentheses:
communications_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_bv_df["Communictaions Best Value"]]
communications_bv_ticks
communications_fg = itables[1]
communications_fg.columns = ["Communications Fastest Growing", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_fg_df = communications_fg.iloc[1:]
communications_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_fg_df["Communications Fastest Growing"]]
communications_fg_ticks
communications_mm = itables[2]
communications_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_mm_df = communications_mm.iloc[1:]
communications_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_mm_df["Communications Most Momentum"]]
del communications_mm_ticks[-2:]
communications_mm_ticks
discretionary = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(3) > a')
discretionary
discretionary[0].click()
dtable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
dtables = pd.read_html(dtable)
discretionary_bv = dtables[0]
discretionary_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
discretionary_bv
# Locate column containing ticker symbols:
discretionary_bv_df = discretionary_bv.iloc[1:]
# Only keep tick information within parentheses:
discretionary_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_bv_df["tick"]]
discretionary_bv_ticks
discretionary_fg = dtables[1]
discretionary_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
discretionary_fg_df = discretionary_fg.iloc[1:]
discretionary_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_fg_df["stock"]]
discretionary_fg_ticks
discretionary_mm = itables[2]
discretionary_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
discretionary_mm_df = discretionary_mm.iloc[1:]
discretionary_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in discretionary_mm_df["Communications Most Momentum"]]
del discretionary_mm_ticks[-2:]
discretionary_mm_ticks
staples = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(4) > a')
staples[0].click()
stable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
stables = pd.read_html(stable)
staples_bv = stables[0]
staples_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
staples_bv
# Locate column containing ticker symbols:
staples_bv_df = staples_bv.iloc[1:]
# Only keep tick information within parentheses:
staples_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_bv_df["tick"]]
staples_bv_ticks
staples_fg = stables[1]
staples_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
staples_fg_df = staples_fg.iloc[1:]
staples_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_fg_df["stock"]]
staples_fg_ticks
staples_mm = stables[2]
staples_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
staples_mm_df = staples_mm.iloc[1:]
staples_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in staples_mm_df["Communications Most Momentum"]]
del staples_mm_ticks[-2:]
staples_mm_ticks
energy = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(5) > a')
energy[0].click()
etable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
etables = pd.read_html(etable)
energy_bv = etables[0]
energy_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
energy_bv
# Locate column containing ticker symbols:
energy_bv_df = energy_bv.iloc[1:]
# Only keep tick information within parentheses:
energy_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_bv_df["tick"]]
energy_bv_ticks
energy_fg = etables[1]
energy_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
energy_fg_df = energy_fg.iloc[1:]
energy_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_fg_df["stock"]]
energy_fg_ticks
energy_mm = etables[2]
energy_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
energy_mm_df = energy_mm.iloc[1:]
energy_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in energy_mm_df["Communications Most Momentum"]]
del energy_mm_ticks[-2:]
energy_mm_ticks
financial = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(6) > a')
financial[0].click()
ftable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
ftables = pd.read_html(ftable)
financial_bv = ftables[0]
financial_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
financial_bv
# Locate column containing ticker symbols:
financial_bv_df = financial_bv.iloc[1:]
# Only keep tick information within parentheses:
financial_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_bv_df["tick"]]
financial_bv_ticks
financial_fg = ftables[1]
financial_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
financial_fg_df = financial_fg.iloc[1:]
financial_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_fg_df["stock"]]
financial_fg_ticks
financial_mm = itables[2]
financial_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
financial_mm_df = financial_mm.iloc[1:]
financial_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in financial_mm_df["Communications Most Momentum"]]
del financial_mm_ticks[-2:]
financial_mm_ticks
healthcare = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(7) > a')
healthcare[0].click()
htable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
htables = pd.read_html(htable)
healthcare_bv = htables[0]
healthcare_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
healthcare_bv
# Locate column containing ticker symbols:
healthcare_bv_df = healthcare_bv.iloc[1:]
# Only keep tick information within parentheses:
healthcare_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_bv_df["tick"]]
healthcare_bv_ticks
healthcare_fg = htables[1]
healthcare_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
healthcare_fg_df = healthcare_fg.iloc[1:]
healthcare_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_fg_df["stock"]]
healthcare_fg_ticks
healthcare_mm = htables[2]
healthcare_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
healthcare_mm_df = healthcare_mm.iloc[1:]
healthcare_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in healthcare_mm_df["Communications Most Momentum"]]
del healthcare_mm_ticks[-2:]
healthcare_mm_ticks
industrial = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(8) > a')
industrial[0].click()
intable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
intables = pd.read_html(intable)
industrial_bv = intables[0]
industrial_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
industrial_bv
# Locate column containing ticker symbols:
industrial_bv_df = industrial_bv.iloc[1:]
# Only keep tick information within parentheses:
industrial_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_bv_df["tick"]]
industrial_bv_ticks
industrial_fg = intables[1]
industrial_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
industrial_fg_df = industrial_fg.iloc[1:]
industrial_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_fg_df["stock"]]
industrial_fg_ticks
industrial_mm = intables[2]
industrial_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
industrial_mm_df = industrial_mm.iloc[1:]
industrial_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in industrial_mm_df["Communications Most Momentum"]]
del industrial_mm_ticks[-2:]
industrial_mm_ticks
materials = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(9) > a')
materials[0].click()
motable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
motables = pd.read_html(motable)
materials_bv = motables[0]
materials_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
materials_bv
# Locate column containing ticker symbols:
materials_bv_df = discretionary_bv.iloc[1:]
# Only keep tick information within parentheses:
materials_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in materials_bv_df["tick"]]
materials_bv_ticks
materials_fg = motables[1]
materials_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
materials_fg_df = materials_fg.iloc[1:]
materials_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in materials_fg_df["stock"]]
materials_fg_ticks
materials_mm = motables[2]
materials_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
materials_mm_df = materials_mm.iloc[1:]
materials_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in materials_mm_df["Communications Most Momentum"]]
del materials_mm_ticks[-2:]
materials_mm_ticks
real_estate = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(10) > a')
real_estate[0].click()
retable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
retables = pd.read_html(retable)
real_estate_bv = retables[0]
real_estate_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
real_estate_bv
# Locate column containing ticker symbols:
real_estate_bv_df = real_estate_bv.iloc[1:]
# Only keep tick information within parentheses:
real_estate_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in real_estate_bv_df["tick"]]
real_estate_bv_ticks
real_estate_fg = retables[1]
real_estate_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
real_estate_fg_df = real_estate_fg.iloc[1:]
real_estate_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in real_estate_fg_df["stock"]]
real_estate_fg_ticks
real_estate_mm = retables[2]
real_estate_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
real_estate_mm_df = real_estate_mm.iloc[1:]
real_estate_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in real_estate_mm_df["Communications Most Momentum"]]
del real_estate_mm_ticks[-2:]
real_estate_mm_ticks
tech = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(11) > a')
tech[0].click()
tetable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
tetables = pd.read_html(tetable)
tech_bv = tetables[0]
tech_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
tech_bv
# Locate column containing ticker symbols:
tech_bv_df = tech_bv.iloc[1:]
# Only keep tick information within parentheses:
tech_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in tech_bv_df["tick"]]
tech_bv_ticks
tech_fg = tetables[1]
tech_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
tech_fg_df = tech_fg.iloc[1:]
tech_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in tech_fg_df["stock"]]
tech_fg_ticks
tech_mm = tetables[2]
tech_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
tech_mm_df = discretionary_mm.iloc[1:]
tech_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in tech_mm_df["Communications Most Momentum"]]
del tech_mm_ticks[-2:]
tech_mm_ticks
utilities = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(12) > a')
utilities[0].click()
utable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
utables = pd.read_html(utable)
utilities_bv = utables[0]
utilities_bv.columns = ["tick", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
utilities_bv
# Locate column containing ticker symbols:
utilities_bv_df = utilities_bv.iloc[1:]
# Only keep tick information within parentheses:
utilities_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in utilities_bv_df["tick"]]
utilities_bv_ticks
utilities_fg = utables[1]
utilities_fg.columns = ["stock", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
utilities_fg_df = utilities_fg.iloc[1:]
utilities_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in utilities_fg_df["stock"]]
utilities_fg_ticks
utilities_mm = utables[2]
utilities_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
utilities_mm_df = utilities_mm.iloc[1:]
utilities_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in utilities_mm_df["Communications Most Momentum"]]
del utilities_mm_ticks[-2:]
utilities_mm_ticks
lists=[communications_bv_ticks,communications_fg_ticks,communications_mm_ticks, discretionary_bv_ticks,discretionary_fg_ticks,discretionary_mm_ticks,staples_bv_ticks,staples_fg_ticks,staples_mm_ticks,energy_bv_ticks,energy_fg_ticks,energy_mm_ticks, financial_bv_ticks,financial_fg_ticks,financial_mm_ticks,healthcare_bv_ticks,healthcare_fg_ticks,healthcare_mm_ticks,industrial_bv_ticks,industrial_fg_ticks,industrial_mm_ticks,tech_bv_ticks,tech_fg_ticks,tech_mm_ticks,materials_bv_ticks,materials_fg_ticks,materials_mm_ticks,real_estate_bv_ticks,real_estate_fg_ticks,real_estate_mm_ticks,utilities_bv_ticks,utilities_fg_ticks,utilities_mm_ticks]
stock_list = [item for sublist in lists for item in sublist]
sector_collection = db['sector_stock_list']
# Insert collection
sector_collection.update_many({}, {"Sector Stocks": stock_list}, upsert = True)
sp500_df= | pd.read_csv('../data/external/sp500.csv') | pandas.read_csv |
import torch
torch.cuda.current_device()
import argparse
import datetime
import json
from collections import defaultdict
import yaml
import logging
import os
import hashlib
import base64
from copy import deepcopy
import numpy as np
np.random.seed(123456789) # makes random sampling from training data deterministic between runs
from torch.utils.data import DataLoader, RandomSampler, Dataset
from tqdm import tqdm, trange
from transformers import (
AdamW,
BertConfig,
BertTokenizer,
BertModel,
RobertaConfig,
RobertaTokenizer,
RobertaModel,
ElectraConfig, ElectraTokenizer, ElectraModel,
AlbertConfig, AlbertTokenizer, AlbertModel,
get_linear_schedule_with_warmup,
)
from transformer_model import TransformerModel, TransformerModelV2, TransformerModelV3, TransformerModelSoftmax, \
TransformerModelMSE, TransformerModelPairwise
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
"albert": (AlbertConfig, AlbertModel, AlbertTokenizer)
}
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Manager:
def __init__(self, config):
self.config = config
self.device = torch.device("cuda" if torch.cuda.is_available() and not config.get("no_gpu", False) else "cpu")
self.all_agents = config["all_agents"]
self.agent_weights = [1.0] * len(self.all_agents)
self.agent_weights_ce = [1.0] * len(self.all_agents)
hashed_config = "" if config.get("disable_hash_in_folder_name", False) else \
str(base64.b64encode(hashlib.md5(bytearray(json.dumps(config, sort_keys=True), encoding="utf-8"))
.digest()), "utf-8").replace("\\'", "_").replace("/", "_")
folder_name = "{}_{}_{}".format(config["model"]["model"], config["model"]["model_name"], hashed_config)
if "folder_name_prefix" in config:
folder_name = "{}_{}".format(config["folder_name_prefix"], folder_name)
self.out_dir = os.path.join(config["out_dir"], folder_name)
os.makedirs(self.out_dir, exist_ok=True)
with open(os.path.join(self.out_dir, "config.yaml"), mode="w") as f:
yaml.dump(config, f)
os.makedirs(os.path.join(self.out_dir, "latest_model"), exist_ok=True)
os.makedirs(os.path.join(self.out_dir, "best_model"), exist_ok=True)
self.model = None
self.tokenizer = None
self.all_results = None
if os.path.exists(os.path.join(self.out_dir, "results.json")):
with open(os.path.join(self.out_dir, "results.json")) as f:
self.all_results = json.load(f)
else:
self.all_results = {"best_epoch": -1, "latest_epoch": -1, "epoch_results": [], "test_results": {}}
logger.info("### Initialize Model ###")
self.init_and_load_model(prefer_best=False)
def run(self):
do_train = self.config.get("do_train", True)
do_dev = self.config.get("do_dev", True)
do_test = self.config.get("do_test", False)
logger.info("### Loading Data ###")
train_dataset = self.load_and_cache_examples(self.tokenizer, "train") if do_train else None
dev_datasets = self.load_and_cache_examples(self.tokenizer, "dev") if do_dev else None
test_datasets = self.load_and_cache_examples(self.tokenizer, "test") if do_test else None
if do_train:
logger.info("### Starting the training ###")
self.train(train_dataset, dev_datasets)
if do_test:
logger.info("### Starting the testing with the best model ###")
self.init_and_load_model(prefer_best=True)
self.all_results["test_results"] = self.eval(test_datasets, is_test=True)
with open(os.path.join(self.out_dir, "results.json"), "w") as f:
json.dump(self.all_results, f, indent=4)
def init_and_load_model(self, prefer_best):
config_class, model_class, tokenizer_class = MODEL_CLASSES[self.config["model"]["model"]]
bert_model = model_class.from_pretrained(self.config["model"]["model_name"], cache_dir=self.config.get("transformer_cache_dir", None))
if self.config["model"].get("version", "v2") == "v1":
model = TransformerModel(self.config, bert_model)
elif self.config["model"].get("version", "v2") == "v2":
model = TransformerModelV2(self.config, bert_model)
elif self.config["model"].get("version", "v2") == "v3":
model = TransformerModelV3(self.config, bert_model)
elif self.config["model"].get("version", "v2") == "softmax":
model = TransformerModelSoftmax(self.config, bert_model)
elif self.config["model"].get("version", "v2") == "mse":
model = TransformerModelMSE(self.config, bert_model)
elif self.config["model"].get("version", "v2") == "pairwise":
model = TransformerModelPairwise(self.config, bert_model)
best_model_file = os.path.join(self.out_dir, "best_model", "model.pty")
latest_model_file = os.path.join(self.out_dir, "latest_model", "model.pty")
if prefer_best and os.path.isfile(best_model_file):
logger.info("Loading best model...")
state_dict = torch.load(best_model_file)
model.load_state_dict(state_dict)
elif os.path.isfile(latest_model_file):
logger.info("Loading latest model...")
state_dict = torch.load(latest_model_file)
model.load_state_dict(state_dict)
elif "base_model" in self.config and os.path.isfile(self.config["base_model"]):
logger.info("Loading a base model...")
state_dict = torch.load(self.config["base_model"])
#non_bert_keys = [key for key in state_dict.keys() if "bert" not in key]
#to_delete = [key for key in non_bert_keys if "extend" in key]
#if self.config.get("base_model_exclude_old_agents", False):
# to_delete.extend([key for key in non_bert_keys if "classifier" in key])
#logger.info("Removing these parameters: {}".format(to_delete))
#for key in to_delete:
# state_dict.pop(key, None)
model.load_state_dict(state_dict, strict=False)
if self.tokenizer is None:
self.tokenizer = tokenizer_class.from_pretrained(self.config["model"]["model_name"], cache_dir=self.config.get("transformer_cache_dir", None))
self.model = model
self.model.to(self.device)
def train(self, train_dataset, dev_datasets=None):
self.model.train()
train_config = self.config["train"]
train_sampler = RandomSampler(train_dataset)
gradient_accumulation_steps = train_config.get("gradient_accumulation_steps", 1)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=int(train_config["batch_size"]/gradient_accumulation_steps), collate_fn=self._collate)
epochs = train_config["epochs"]
if train_config.get("weight_constrain", False):
param_copy = deepcopy(list(self.model.bert.parameters())+list(self.model.classifier.parameters())+list(self.model.adapter.parameters()))
if train_config.get("max_steps", 0) > 0:
t_total = train_config["max_steps"]
epochs = train_config["max_steps"] // (len(train_dataloader) // gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // gradient_accumulation_steps * epochs
if train_config.get("freeze_bert", False) or train_config.get("freeze_extend", False):
logger.warning("! Freezing Bert Parameters !")
for param in self.model.bert.parameters():
param.requires_grad = False
#if self.config["model"].get("version", "v2") == "v1":
# for param in self.model.preclass.parameters():
# param.requires_grad = False
if train_config.get("freeze_extend", False):
if self.config["model"].get("version", "v2") == "v3":
for param in self.model.preclass1.parameters():
param.requires_grad = False
for param in self.model.preclass2.parameters():
param.requires_grad = False
self.model.embedding.requires_grad = False
else:
for param in self.model.classifier.parameters():
param.requires_grad = False
if self.config["model"].get("version", "v2") in ["v2", "softmax", "mse", "pairwise"]:
logger.warning("! Freezing Old Classifier Parameters !")
for param in self.model.adapter.parameters():
param.requires_grad = False
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
weight_decay = train_config.get("weight_decay", 0.0)
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
learning_rate = train_config.get("learning_rate", 5e-5)
adam_epsilon = train_config.get("adam_epsilon", 1e-8)
warmup_fraction = train_config.get("warmup_fraction", 0.0)
warmup_steps = t_total*warmup_fraction
max_grad_norm = train_config.get("max_grad_norm", 1.0)
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
latest_optim = os.path.join(self.out_dir, "latest_model", "optimizer.pty")
latest_scheduler = os.path.join(self.out_dir, "latest_model", "scheduler.pty")
if os.path.isfile(latest_optim) and os.path.isfile(latest_scheduler):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(latest_optim))
scheduler.load_state_dict(torch.load(latest_scheduler))
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", epochs)
logger.info(" Batchsize = %d", train_config["batch_size"])
logger.info(" Gradient Accumulation steps = %d", gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
epochs_trained = 0
# Check if continuing training from a checkpoint
if self.all_results["latest_epoch"] >= 0:
# set global_step to global_step of last saved checkpoint from model path
epochs_trained = self.all_results["latest_epoch"]+1
logger.info(" Continuing training from checkpoint")
logger.info(" Continuing training from epoch %d", epochs_trained)
tr_loss, log_loss, epoch_loss = 0.0, 0.0, 0.0
self.model.zero_grad()
train_iterator = trange(epochs_trained, int(epochs), desc="Epoch", position=0)
for current_epoch in train_iterator:
train_dataset.resample()
epoch_iterator = tqdm(train_dataloader, position=1, desc="Iteration")
loss_log = tqdm(total=0, position=2, bar_format="{desc}")
for step, batch in enumerate(epoch_iterator):
self.model.train()
batch = tuple(t.to(self.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[2], "pos_weight": batch[3]}
if train_config.get("loss_resample"):
outputs = self.model(**inputs, reduction="none")
loss = outputs[1] # model outputs are always tuple in transformers (see doc)
if train_config["loss_resample_mode"] == "new":
resample_loss = loss[:, -1].cpu().tolist()
elif train_config["loss_resample_mode"] == "global":
resample_loss = torch.max(loss, dim=1)[0].cpu().tolist()
loss = torch.mean(loss)
train_dataset.add_losses(resample_loss)
else:
outputs = self.model(**inputs)
loss = outputs[1] # model outputs are always tuple in transformers (see doc)
loss.backward()
tr_loss += loss.item()
if (step + 1) % gradient_accumulation_steps == 0:
if max_grad_norm>0:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
if train_config.get("weight_constrain", False):
params = list(self.model.bert.parameters())+list(self.model.classifier.parameters())+list(self.model.adapter.parameters())
constrain_loss = train_config["weight_constrain_factor"]*torch.stack([torch.sum((pc-p)**2) for pc, p in zip(param_copy, params)]).sum()
tr_loss = tr_loss + constrain_loss
optimizer.step()
scheduler.step() # Update learning rate schedule
self.model.zero_grad()
if step%10 == 0:
l = (tr_loss-log_loss)/10
log_loss = tr_loss
loss_log.set_description_str("loss: {}".format(l))
if dev_datasets is not None:
logs = {}
results = self.eval(dev_datasets, is_test=False)
for key, value in results.items():
logs[key] = value
results["epoch"] = current_epoch
self.all_results["epoch_results"].append(results)
# Save model checkpoint
best_model_file = os.path.join(self.out_dir, "best_model", "model.pty")
latest_model_file = os.path.join(self.out_dir, "latest_model", "model.pty")
latest_optim = os.path.join(self.out_dir, "latest_model", "optimizer.pty")
latest_scheduler = os.path.join(self.out_dir, "latest_model", "scheduler.pty")
state_dict = self.model.state_dict()
main_metric = self.config["dev"].get("main_metric", "accuracy")
current_main_metric = results[main_metric]
old_main_matric = self.all_results["epoch_results"][self.all_results["best_epoch"]][main_metric]
# true iff result larger previous best and larger better or result smaller and larger not better
better = (self.config["dev"].get("larger_is_better", True) ==
(current_main_metric > old_main_matric)) or current_epoch==0
if better:
self.all_results["best_epoch"] = current_epoch
torch.save(state_dict, best_model_file)
logger.info("New best epoch result. Current epoch improves in {} from {:.4f} to {:.4f}".format(
main_metric, old_main_matric, current_main_metric))
torch.save(state_dict, latest_model_file)
logger.info("Saving latest model checkpoint")
torch.save(optimizer.state_dict(), latest_optim)
torch.save(scheduler.state_dict(), latest_scheduler)
logger.info("Saving optimizer and scheduler states")
self.all_results["latest_epoch"] = current_epoch
with open(os.path.join(self.out_dir, "results.json"), "w") as f:
json.dump(self.all_results, f, indent=4)
def eval(self, datasets, is_test=True):
self.model.eval()
if is_test:
eval_config = self.config["test"]
else:
eval_config = self.config["dev"]
results_all = defaultdict(lambda: list())
confusion_matrix = [[0]*len(self.all_agents) for _ in self.all_agents]
activation_mean = [[0]*len(self.all_agents) for _ in self.all_agents]
activation_max = [[0]*len(self.all_agents) for _ in self.all_agents]
activation_min = [[0]*len(self.all_agents) for _ in self.all_agents]
activation_std = [[0]*len(self.all_agents) for _ in self.all_agents]
for i, dataset in enumerate(datasets):
agent_label = dataset[0][1]
agent = self.all_agents[dataset[0][1]]
dataloader = DataLoader(dataset, shuffle=False, batch_size=eval_config["batch_size"], collate_fn=self._collate)
dataset_iterator = tqdm(dataloader, desc="Iteration ({})".format(agent))
logger.info("Evaluating agent {}/{}: {}".format(i+1, len(datasets), agent))
logger.info("{} questions".format(len(dataset)))
start = datetime.datetime.now()
ranks = []
average_precisions = []
activation = []
for batch in dataset_iterator:
batch = tuple(t.to(self.device) for t in batch[:2])
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
with torch.no_grad():
outputs = self.model(**inputs)[0].cpu().numpy()
for res in outputs:
rank = 0
precisions = 0
activation.append(res)
ranking = np.argsort(res)[::-1]
confusion_matrix[agent_label][ranking[0]] += 1
for j, answer in enumerate(ranking, start=1):
if answer == agent_label:
if rank == 0:
rank = j
precisions = 1 / float(j)
ranks.append(rank)
average_precisions.append(precisions)
end = datetime.datetime.now()
time_taken = end - start
correct_answers = len([a for a in ranks if a == 1])
recall_3 = len([a for a in ranks if a <= 3 and a != 0])
recall_5 = len([a for a in ranks if a <= 5 and a != 0])
results = {
'accuracy': correct_answers / float(len(ranks)),
'mrr': np.mean([1 / float(r) if r>0 else 0 for r in ranks]),
'r@3': recall_3/float(len(ranks)),
'r@5': recall_5/float(len(ranks)),
'query_per_second': float(len(ranks))/time_taken.total_seconds()
}
#activation = (activation/float(len(ranks))).tolist()
activation = np.array(activation)
mean = np.mean(activation, axis=0).tolist()
max = np.max(activation, axis=0).tolist()
min = np.min(activation, axis=0).tolist()
std = np.std(activation, axis=0).tolist()
activation_mean[agent_label] = mean
activation_min[agent_label] = min
activation_max[agent_label] = max
activation_std[agent_label] = std
for key, value in results.items():
results_all[key].append(value)
results_all[agent] = results
results_all["confusion_matrix"] = confusion_matrix
results_all["activation"] = {}
results_all["activation"]["mean"] = activation_mean
results_all["activation"]["std"] = activation_std
results_all["activation"]["min"] = activation_min
results_all["activation"]["max"] = activation_max
precision, recall, f1 = self._compute_f1(confusion_matrix)
for dataset in datasets:
agent = self.all_agents[dataset[0][1]]
idx = self.all_agents.index(agent)
results = results_all[agent]
results["precision"] = precision[idx]
results["recall"] = recall[idx]
results["f1"] = f1[idx]
results_all["precision"].append(precision[idx])
results_all["recall"].append(recall[idx])
results_all["f1"].append(f1[idx])
logger.info('\nResults for agent {}'.format(agent))
self._log_results(results)
for key in ["accuracy", "precision", "recall", "f1", "mrr", "r@3", 'r@5', "query_per_second"]:
results_all[key] = np.mean(results_all[key])
logger.info('\nResults for all datasets:')
self._log_results(results_all)
if eval_config.get("print_confusion_matrix", False):
self._log_confusion_matrix(confusion_matrix)
return results_all
def outlier_eval(self, outlier_agent):
self.init_and_load_model(prefer_best=True)
self.all_agents.append(outlier_agent)
datasets = self.load_and_cache_examples(self.tokenizer, "test")
self.model.eval()
eval_config = self.config["test"]
results_all = defaultdict(lambda: list())
over_threshold5 = []
over_threshold8 = []
dataset = datasets[0]
dataloader = DataLoader(dataset, shuffle=False, batch_size=eval_config["batch_size"], collate_fn=self._collate)
dataset_iterator = tqdm(dataloader, desc="Iteration ({})".format(outlier_agent))
logger.info("Outlier evaluation with agent: {}".format(outlier_agent))
logger.info("{} questions".format(len(dataset)))
for batch in dataset_iterator:
batch = tuple(t.to(self.device) for t in batch[:2])
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
with torch.no_grad():
outputs = self.model(**inputs)[0].cpu().numpy()
for res in outputs:
a = res.copy()
a[a >= 0.5] = 1
a[a < 0.5] = 0
b = res.copy()
b[b >= 0.8] = 1
b[b < 0.8] = 0
over_threshold5.append(a)
over_threshold8.append(b)
over_threshold5 = np.array(over_threshold5).mean(axis=0).tolist()
over_threshold8 = np.array(over_threshold8).mean(axis=0).tolist()
results_all = {"agent": outlier_agent, "over_threshold_0.5": over_threshold5, "over_threshold_0.8": over_threshold8}
logger.info(results_all)
self.all_results["outlier_results"] = results_all
with open(os.path.join(self.out_dir, "results.json"), "w") as f:
json.dump(self.all_results, f, indent=4)
def outliers_eval(self):
self.init_and_load_model(prefer_best=True)
datasets = self.load_and_cache_examples(self.tokenizer, "test")
self.model.eval()
eval_config = self.config["test"]
results_all = []
for dataset in datasets:
over_threshold5 = []
over_threshold8 = []
outlier_agent = self.all_agents[dataset[0][1]]
dataloader = DataLoader(dataset, shuffle=False, batch_size=eval_config["batch_size"], collate_fn=self._collate)
dataset_iterator = tqdm(dataloader, desc="Iteration ({})".format(outlier_agent))
logger.info("Outlier evaluation with agent: {}".format(outlier_agent))
logger.info("{} questions".format(len(dataset)))
for batch in dataset_iterator:
batch = tuple(t.to(self.device) for t in batch[:2])
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
with torch.no_grad():
outputs = self.model(**inputs)[0].cpu().numpy()
for res in outputs:
a = res.copy()
a[a >= 0.5] = 1
a[a < 0.5] = 0
b = res.copy()
b[b >= 0.8] = 1
b[b < 0.8] = 0
over_threshold5.append(a)
over_threshold8.append(b)
over_threshold5 = np.array(over_threshold5).mean(axis=0).tolist()
over_threshold8 = np.array(over_threshold8).mean(axis=0).tolist()
results_all.append({"agent": outlier_agent, "over_threshold_0.5": over_threshold5, "over_threshold_0.8": over_threshold8})
logger.info(results_all)
self.all_results["outlier_results"] = results_all
with open(os.path.join(self.out_dir, "results.json"), "w") as f:
json.dump(self.all_results, f, indent=4)
def activation_eval(self, new_agent):
self.init_and_load_model(prefer_best=True)
self.all_agents.append(new_agent)
datasets = self.load_and_cache_examples(self.tokenizer, "test")
self.model.eval()
eval_config = self.config["test"]
results_all = defaultdict(lambda: dict())
for i, dataset in enumerate(datasets):
agent = self.all_agents[dataset[0][1]]
dataloader = DataLoader(dataset, shuffle=False, batch_size=eval_config["batch_size"], collate_fn=self._collate)
dataset_iterator = tqdm(dataloader, desc="Iteration ({})".format(agent))
logger.info("Evaluating agent {}/{}: {}".format(i+1, len(datasets), agent))
logger.info("{} questions".format(len(dataset)))
activation = []
for batch in dataset_iterator:
batch = tuple(t.to(self.device) for t in batch[:2])
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
with torch.no_grad():
outputs = self.model(**inputs)[0].cpu().numpy()
for res in outputs:
top_activation = np.sort(res)[-1].item()
activation.append(top_activation)
if agent == new_agent:
results_all["outlier"][agent] = activation
else:
results_all["trained"][agent] = activation
with open(os.path.join(self.out_dir, "results_activation.json"), "w") as f:
json.dump(results_all, f, indent=4)
def _log_results(self, results):
logger.info('Accuracy: {}'.format(results['accuracy']))
logger.info('Precision: {}'.format(results['precision']))
logger.info('Recall: {}'.format(results['recall']))
logger.info('F1: {}'.format(results['f1']))
logger.info('MRR: {}'.format(results['mrr']))
logger.info('R@3: {}'.format(results['r@3']))
logger.info('R@5: {}'.format(results['r@5']))
logger.info('Queries/second: {}'.format(results['query_per_second']))
def _log_confusion_matrix(self, confusion_matrix):
cell_width = max([5]+[len(s) for s in self.all_agents])
logger.info("Confusion Matrix:")
logger.info("|".join(["".ljust(cell_width)]+[str(s).ljust(cell_width) for s in self.all_agents]))
for agent, row in zip(self.all_agents, confusion_matrix):
logger.info("|".join([agent.ljust(cell_width)]+[str(s).ljust(cell_width) for s in row]))
def _compute_f1(self, confusion_matrix):
matrix = np.array(confusion_matrix)
relevant = np.sum(matrix, axis=1)
retrieved = np.sum(matrix, axis=0)
precision, recall, f1 = [], [], []
for i, val in enumerate(np.diag(matrix)):
if retrieved[i]==0:
p=0
else:
p = val/retrieved[i]
if relevant[i]==0:
r=0
else:
r = val/relevant[i]
precision.append(p)
recall.append(r)
if r==0 or p==0:
f1.append(0)
else:
f1.append((2*r*p)/(r+p))
return precision, recall, f1
def _collate(self, samples):
input_ids, labels = zip(*samples)
max_len = min(self.config["model"]["max_length"], max([len(input) for input in input_ids]))
attention_mask = [[1]*len(input)+[0]*(max_len-len(input)) for input in input_ids]
input_ids = [input+[0]*(max_len-len(input)) for input in input_ids]
if self.config["model"].get("version", "v2") == "softmax":
pos_weights = torch.FloatTensor(self.agent_weights_ce)
one_hot_labels = torch.LongTensor(labels) # not really one hot
elif self.config["model"].get("version", "v2") == "pairwise":
pos_weights = torch.FloatTensor(self.agent_weights_ce)
one_hot_labels = torch.LongTensor(labels) # not really one hot
loss_label = torch.zeros((len(labels), len(self.all_agents))).long() - 1
loss_label[:,0] = one_hot_labels
one_hot_labels = loss_label
else:
pos_weights = torch.FloatTensor(self.agent_weights)
one_hot_labels = torch.FloatTensor(len(labels), len(self.config["all_agents"])) \
.zero_() \
.scatter_(1, torch.Tensor(labels).long().unsqueeze(1), 1)
input_ids = torch.tensor(input_ids, dtype=torch.long)
attention_mask = torch.tensor(attention_mask, dtype=torch.long)
return input_ids, attention_mask, one_hot_labels, pos_weights
def load_and_cache_examples(self, tokenizer, partition):
directory = os.path.join(self.config["cache_dir"], self.config["model"]["model_name"].replace("/", "_"), partition)
os.makedirs(directory, exist_ok=True)
base_paths = self.config[partition]["base_path"]
all_examples = {"input_ids": [], "label": []}
num_examples_per_agent = {}
truncate_idx = 0 # for multiple truncate values; looping through
extend_truncate_idx = 0
for i, (agent, file_names) in enumerate(self.config[partition]["agents"].items()):
file_path = os.path.join(directory, "{}.pyt".format(agent))
if os.path.exists(file_path):
logger.info("Loading cached {} set for {}".format(partition, agent))
examples = torch.load(file_path)
else:
logger.info("No cached data for {}. Creating it...".format(agent))
data = _read_file(file_names, base_paths)
res = tokenizer.batch_encode_plus(data, add_special_tokens=True, max_length=self.config["model"]["max_length"],
return_token_type_ids=False, return_attention_masks=False)
examples = {"input_ids": res["input_ids"]}
torch.save(examples, file_path)
logger.info("Cached {} set for {} data to {}".format(partition, agent, file_path))
idx = self.all_agents.index(agent)
examples["label"] = [idx]*len(examples["input_ids"])
truncate = self.config[partition].get("truncate", len(examples["input_ids"]))
if isinstance(truncate, list):
truncate = truncate[truncate_idx]
true_truncate = truncate # for epoch resample
if self.config.get("agents_extended", 0)>0 and \
agent in self.all_agents[-self.config.get("agents_extended", 0):]:
truncate = self.config[partition].get("extend_truncate", truncate)
if isinstance(truncate, list):
truncate = truncate[extend_truncate_idx]
extend_truncate_idx = (extend_truncate_idx+1)%len(self.config[partition].get("extend_truncate", truncate))
true_truncate = truncate
elif isinstance(self.config[partition].get("truncate", len(examples["input_ids"])), list):
truncate_idx = (truncate_idx+1)%len(self.config[partition].get("truncate", len(examples["input_ids"])))
if self.config["train"].get("epoch_resample", False) or self.config["train"].get("loss_resample", False):
truncate = self.config[partition].get("extend_truncate", truncate)
if isinstance(truncate, list):
truncate = max(truncate)
chosen_idxs = np.random.choice(len(examples["input_ids"]), truncate)
if self.config.get("agents_extended", 0)>0 and \
agent in self.all_agents[-self.config.get("agents_extended", 0):] and \
(self.config["train"].get("epoch_resample", False) or self.config["train"].get("loss_resample", False)) :
extend_examples = {"input_ids": [], "label": []}
extend_examples["input_ids"] = [examples["input_ids"][idx] for idx in chosen_idxs]
extend_examples["label"] = [examples["label"][idx] for idx in chosen_idxs]
else:
all_examples["input_ids"].append([examples["input_ids"][idx] for idx in chosen_idxs])
all_examples["label"].append([examples["label"][idx] for idx in chosen_idxs])
num_examples_per_agent[agent] = len(examples["label"][:true_truncate]) # can be smaller than truncate
logger.info("{} samples for {} {}".format(len(examples["label"][:true_truncate]), partition, agent))
if partition == "train":
total_examples = sum(num_examples_per_agent.values())
for i, agent in enumerate(self.all_agents):
if agent in num_examples_per_agent:
# case with all agents equal number of examples: positive weight = #agents-1
self.agent_weights[i] = (total_examples-num_examples_per_agent[agent])/num_examples_per_agent[agent]
self.agent_weights_ce[i] = (total_examples/len(self.all_agents))/num_examples_per_agent[agent]
if self.config["train"].get("epoch_resample", False):
dataset = CustomResampleDataset(input_ids=all_examples["input_ids"], labels=all_examples["label"],
truncate=self.config["train"]["truncate"],
extend_input_ids=extend_examples["input_ids"], extend_labels=extend_examples["label"])
elif self.config["train"].get("loss_resample", False):
dataset = CustomLossResampleDataset(input_ids=all_examples["input_ids"], labels=all_examples["label"],
truncate=self.config["train"]["truncate"],
extend_input_ids=extend_examples["input_ids"], extend_labels=extend_examples["label"])
else:
dataset = CustomDataset([example for examples in all_examples["input_ids"] for example in examples],
[label for labels in all_examples["label"] for label in labels])
else:
dataset = [CustomDataset(examples, labels)
for examples, labels in zip(all_examples["input_ids"], all_examples["label"])]
return dataset
def load_examples_with_text(self, tokenizer, partition):
directory = os.path.join(self.config["cache_dir"], self.config["model"]["model_name"].replace("/", "_"), partition)
base_paths = self.config[partition]["base_path"]
all_examples = {"input_ids": [], "label": []}
all_data = []
num_examples_per_agent = {}
truncate_idx = 0 # for multiple truncate values; looping through
extend_truncate_idx = 0
for i, (agent, file_names) in enumerate(self.config[partition]["agents"].items()):
file_path = os.path.join(directory, "{}.pyt".format(agent))
logger.info("No cached data for {}. Creating it...".format(agent))
data = _read_file(file_names, base_paths)
res = tokenizer.batch_encode_plus(data, add_special_tokens=True, max_length=self.config["model"]["max_length"],
return_token_type_ids=False, return_attention_masks=False)
examples = {"input_ids": res["input_ids"]}
idx = self.all_agents.index(agent)
examples["label"] = [idx]*len(examples["input_ids"])
truncate = self.config[partition].get("truncate", len(examples["input_ids"]))
if isinstance(truncate, list):
truncate = truncate[truncate_idx]
true_truncate = truncate # for epoch resample
if self.config.get("agents_extended", 0)>0 and \
agent in self.all_agents[-self.config.get("agents_extended", 0):]:
truncate = self.config[partition].get("extend_truncate", truncate)
if isinstance(truncate, list):
truncate = truncate[extend_truncate_idx]
extend_truncate_idx = (extend_truncate_idx+1)%len(self.config[partition].get("extend_truncate", truncate))
true_truncate = truncate
elif isinstance(self.config[partition].get("truncate", len(examples["input_ids"])), list):
truncate_idx = (truncate_idx+1)%len(self.config[partition].get("truncate", len(examples["input_ids"])))
if self.config["train"].get("epoch_resample", False) or self.config["train"].get("loss_resample", False):
truncate = self.config[partition].get("extend_truncate", truncate)
if isinstance(truncate, list):
truncate = max(truncate)
chosen_idxs = np.random.choice(len(examples["input_ids"]), truncate)
if self.config.get("agents_extended", 0)>0 and \
agent in self.all_agents[-self.config.get("agents_extended", 0):] and \
(self.config["train"].get("epoch_resample", False) or self.config["train"].get("loss_resample", False)) :
extend_examples = {"input_ids": [], "label": []}
extend_examples["input_ids"] = [examples["input_ids"][idx] for idx in chosen_idxs]
extend_examples["label"] = [examples["label"][idx] for idx in chosen_idxs]
else:
all_examples["input_ids"].append([examples["input_ids"][idx] for idx in chosen_idxs])
all_examples["label"].append([examples["label"][idx] for idx in chosen_idxs])
all_data.append([data[idx] for idx in chosen_idxs])
dataset = [CustomDataset(examples, labels)
for examples, labels in zip(all_examples["input_ids"], all_examples["label"])]
return dataset, all_data
def eval_for_manual_annotation(self, datasets, questions, chose=50):
import pandas as pd
self.model.eval()
eval_config = self.config["test"]
errors = []
for i, (dataset, qs) in enumerate(zip(datasets, questions)):
agent_label = dataset[0][1]
agent = self.all_agents[dataset[0][1]]
dataloader = DataLoader(dataset, shuffle=False, batch_size=eval_config["batch_size"], collate_fn=self._collate)
dataset_iterator = tqdm(dataloader, desc="Iteration ({})".format(agent))
logger.info("Evaluating agent {}/{}: {}".format(i+1, len(datasets), agent))
logger.info("{} questions".format(len(dataset)))
activation = []
for i, batch in enumerate(dataset_iterator):
batch = tuple(t.to(self.device) for t in batch[:2])
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
with torch.no_grad():
outputs = self.model(**inputs)[0].cpu().numpy()
for j, res in enumerate(outputs):
rank1 = np.argsort(res)[::-1][0]
if rank1 != agent_label:
errors.append((qs[i*eval_config["batch_size"]+j], agent, self.all_agents[rank1]))
chosen_idx = np.random.choice(len(errors), chose, replace=False)
errors = [errors[i] for i in chosen_idx]
key = []
shuffle = []
for e in errors:
a, b = np.random.choice(2, 2, replace=False)
shuffle.append((e[0], e[1+a], e[1+b]))
key.append(a)
df = pd.DataFrame(shuffle, columns=["Question", "A", "B"])
key = | pd.DataFrame(key, columns=["Key"]) | pandas.DataFrame |
from __future__ import division
import torch
import torchvision.models as models
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch.autograd import Variable
from glob import glob
import os
import numpy as np
import pandas as pd
import json
import re
from PIL import Image
import base64
from embedding import *
# retrieve sketch paths
def list_files(path, ext='png'):
result = [y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.%s' % ext))]
return result
def check_invalid_sketch(filenames,invalids_path='drawings_to_exclude.txt'):
if not os.path.exists(invalids_path):
print('No file containing invalid paths at {}'.format(invalids_path))
invalids = []
else:
x = pd.read_csv(invalids_path, header=None)
x.columns = ['filenames']
invalids = list(x.filenames.values)
valids = []
basenames = [f.split('/')[-1] for f in filenames]
for i,f in enumerate(basenames):
if f not in invalids:
valids.append(filenames[i])
return valids
def make_dataframe(Labels):
Y = | pd.DataFrame([Labels]) | pandas.DataFrame |
import pandas as pd
from sklearn.cluster import KMeans
from tree_process import *
from file_process import *
def compare_tree(root_node1, root_node2):
collector1 = NodeVisitorLevel(root_node1)
collector1.generic_visit_level()
collector1.process_tree()
collector2 = NodeVisitorLevel(root_node2)
collector2.generic_visit_level()
collector2.process_tree()
maxNum = max(collector1.nodesNum,collector2.nodesNum)
values = [[] for i in range(0,maxNum+1)]
for key in collector2.d.keys():
values[collector2.d[key][0]].append(key)
collector1.others_dict = collector2.d
collector1.others_values = values
collector1.generic_visit(root_node1)
if collector1.nodesNum+collector2.nodesNum==0:
return 0
return float(collector1.same_nodes)/(collector1.nodesNum+collector2.nodesNum)
def detect_pair(code1, code2):
try:
root1 = ast.parse(code1)
root2 = ast.parse(code2)
except Exception as e:
print(e)
return 0
return compare_tree(root1, root2)
def detect_directory(root, number, th):
codes = []
asts = []
vectors = []
codes_path = get_all_py_path(root)
codes_path_new = []
collectors = []
for path in codes_path:
try:
f = open(path,'r+',errors='ignore')
code = f.read()
f.close()
codes.append(code)
root_node = ast.parse(code)
collector = FuncNodeCollector()
collector.visit(root_node)
vector = [collector._expr, collector._call, collector._classdef,
collector._funcdef, collector._name, collector._attribute]
levelVisit = NodeVisitorLevel(root_node)
levelVisit.generic_visit_level()
levelVisit.process_tree()
if max(vector) == min(vector):
continue
Z_ScoreNormalization(vector)
asts.append(root_node)
collectors.append(collector)
codes_path_new.append(path)
vectors.append(vector)
except Exception as err:
print(err)
X = np.array(vectors)
d = pd.DataFrame(X)
d.head()
# 聚类
mod = KMeans(n_clusters=number, n_jobs=4, max_iter=500) # 聚成3类数据,并发数为4,最大循环次数为500
mod.fit_predict(d) # y_pred表示聚类的结果
# 聚成3类数据,统计每个聚类下的数据量,并且求出他们的中心
r1 = pd.Series(mod.labels_).value_counts()
r2 = pd.DataFrame(mod.cluster_centers_)
r = pd.concat([r2, r1], axis=1)
r.columns = list(d.columns) + [u'number']
print(r)
# 给每一条数据标注上被分为哪一类
r = pd.concat([d, pd.Series(mod.labels_, index=d.index)], axis=1)
r.columns = list(d.columns) + [u'kind']
print(r)
labels = list(mod.labels_)
dict = {}
dict2 = {}
dict3 = {}
dic_root = {}
for i in range(0,len(labels)):
if labels[i] in dict.keys():
array = dict[labels[i]]
array.append(codes_path_new[i])
dict[labels[i]] = array
array = dict2[labels[i]]
array.append(collectors[i])
dict2[labels[i]] = array
array = dict3[labels[i]]
array.append(codes[i])
dict3[labels[i]] = array
roots = dic_root[labels[i]]
roots.append(asts[i])
dic_root[labels[i]] = roots
else:
array = []
array.append(codes_path_new[i])
dict[labels[i]] = array
array = []
array.append(collectors[i])
dict2[labels[i]] = array
array = []
array.append(codes[i])
dict3[labels[i]] = array
roots = []
roots.append(asts[i])
dic_root[labels[i]] = roots
for key in dict.keys():
if len(dict[key])<2:
continue
for i in range(0,len(dict[key])-1):
for j in range(i+1,len(dict[key])):
sim = compare_tree(dic_root[key][i],dic_root[key][j])
if sim>th:
print(dict[key][i])
print(dict[key][j])
print(sim)
def detect_content(notes,codes,number,th):
asts = []
vectors = []
collectors = []
for code in codes:
try:
root_node = ast.parse(code)
collector = FuncNodeCollector()
collector.visit(root_node)
vector = [collector._expr, collector._call, collector._classdef,
collector._funcdef, collector._name, collector._attribute]
levelVisit = NodeVisitorLevel(root_node)
levelVisit.generic_visit_level()
levelVisit.process_tree()
if max(vector) == min(vector):
continue
Z_ScoreNormalization(vector)
asts.append(root_node)
collectors.append(collector)
vectors.append(vector)
except Exception as err:
print(err)
X = np.array(vectors)
d = | pd.DataFrame(X) | pandas.DataFrame |
import pandas as pd
import functools
from PIL import Image
import uuid
import os
def check_y_overlap(bb1, bb2):
_, x1, _, x2 = bb1
_, y1, _, y2 = bb2
return y2 >= x1 and x2 >= x1
def aggregate_equations(page_group, write_images_pth):
targets = []
objs = []
for ind, p in page_group.iterrows():
if p['postprocess_cls'] == 'Equation':
targets.append(p)
else:
objs.append(p)
page_content = ' '.join([p['content'] for p in objs])
final_objs = []
for t in targets:
img = Image.open(t['img_pth']).convert('RGB').crop(t['bounding_box'])
imgid = uuid.uuid4()
pth = os.path.join(write_images_pth, f'{imgid}.png')
img.save(pth)
eq_obj = {'pdf_name': t['pdf_name'],
'dataset_id': t['dataset_id'],
'detect_score': t['detect_score'],
'postprocess_score': t['postprocess_score'],
'equation_bb': t['bounding_box'],
'equation_page': t['page_num'],
'content': page_content,
'img_pth': pth}
final_objs.append(eq_obj)
return final_objs
def caption_associate(page_group, caption_class, write_images_pth):
captions = []
objs = []
for ind, p in page_group.iterrows():
if p['postprocess_cls'] == caption_class:
captions.append(p)
else:
objs.append(p)
final_objs = []
for caption in captions:
cbb = caption['bounding_box']
tlx, tly, brx, bry = cbb
mid_x = (tlx + brx) / 2
mid_y = (tly + bry) / 2
min_sdist = None
min_ind = None
group_obj = {'pdf_name': caption['pdf_name'],
'dataset_id': caption['dataset_id'],
'caption_content': caption['content'],
'caption_page': caption['page_num'],
'caption_bb': caption['bounding_box'],
'pdf_dims': caption['pdf_dims']}
if len(objs) == 0:
continue
for ind, obj in enumerate(objs):
tlx, tly, brx, bry = obj['bounding_box']
o_mid_x = (tlx + brx) / 2
o_mid_y = (tly + bry) / 2
sdist = (mid_x - o_mid_x)**2 + (mid_y-o_mid_y)**2
if min_sdist is None:
min_sdist = sdist
min_ind = ind
continue
if min_sdist > sdist:
min_sdist = sdist
min_ind = ind
min_obj = objs.pop(min_ind)
group_obj['content'] = min_obj['content']
group_obj['obj_page'] = min_obj['page_num']
group_obj['obj_bbs'] = min_obj['bounding_box']
group_obj['detect_score'] = min_obj['detect_score']
group_obj['postprocess_score'] = min_obj['postprocess_score']
img = Image.open(min_obj['img_pth']).convert('RGB').crop(min_obj['bounding_box'])
imgid = uuid.uuid4()
pth = os.path.join(write_images_pth, f'{imgid}.png')
img.save(pth)
group_obj['img_pth'] = pth
final_objs.append(group_obj)
for obj in objs:
img = Image.open(obj['img_pth']).convert('RGB').crop(obj['bounding_box'])
imgid = uuid.uuid4()
pth = os.path.join(write_images_pth, f'{imgid}.png')
img.save(pth)
group_obj = {'pdf_name': obj['pdf_name'],
'dataset_id': obj['dataset_id'],
'caption_content': None,
'caption_page': None,
'caption_bb': None,
'pdf_dims': obj['pdf_dims'],
'detect_score': obj['detect_score'],
'postprocess_score': obj['postprocess_score'],
'content': obj['content'],
'obj_page': obj['page_num'],
'obj_bbs': obj['bounding_box'],
'img_pth': pth}
final_objs.append(group_obj)
return final_objs
def order_page(page_group):
y_groups = []
for ind, p in page_group.iterrows():
grouped_flag = False
for group in y_groups:
for member in group:
overlaps = check_y_overlap(p['bounding_box'], member['bounding_box'])
if overlaps:
group.append(p)
grouped_flag = True
break
if grouped_flag:
break
if not grouped_flag:
y_groups.append([p])
sorted_groups = []
for group in y_groups:
slist = sorted(group, key=lambda x: x['bounding_box'][0])
nested_slist = []
for obj in slist:
grouped_flag = False
for sublist in nested_slist:
for element in sublist:
if abs(element['bounding_box'][0] - obj['bounding_box'][0]) < 20:
sublist.append(obj)
grouped_flag = True
break
if grouped_flag:
break
if not grouped_flag:
nested_slist.append([obj])
internal_sort = []
for slist in nested_slist:
internal_sort.append(sorted(slist, key=lambda x:x['bounding_box'][1]))
sorted_groups.append(internal_sort)
sorted_groups = sorted(sorted_groups, key=lambda x: x[0][0]['bounding_box'][1])
final_ordering = []
for group in sorted_groups:
for sublist in group:
for element in sublist:
final_ordering.append(element)
return final_ordering
def group_section(obj_list):
section = {'pdf_name': obj_list[0]['pdf_name'], 'dataset_id': obj_list[0]['dataset_id']}
section['detect_score'] = obj_list[0]['detect_score']
section['postprocess_score'] = obj_list[0]['postprocess_score']
if obj_list[0]['postprocess_cls'] == 'Section Header':
section['section_header'] = obj_list[0]['content']
section['section_header_page'] = obj_list[0]['page_num']
section['section_header_bb'] = obj_list[0]['bounding_box']
obj_list.pop(0)
content = [obj['content'] for obj in obj_list]
content = ' '.join(content)
section['content'] = content
section['obj_pages'] = [obj['page_num'] for obj in obj_list]
section['obj_bbs'] = [obj['bounding_box'] for obj in obj_list]
return section
def aggregate_sections(pdf):
pdf = pdf[pdf['postprocess_cls'].isin(['Body Text', 'Section Header'])]
grouped = pdf.groupby('page_num').apply(order_page)
final_ordering = []
for ind, order in grouped.iteritems():
final_ordering.extend(order)
sections = [[]]
for item in final_ordering:
if item['postprocess_cls'] == 'Section Header':
sections.append([item])
else:
sections[-1].append(item)
sections = [group_section(s) for s in sections if len(s) > 0]
return sections
def aggregate_tables(pdf, write_images_pth):
pdf = pdf[pdf['postprocess_cls'].isin(['Table', 'Table Caption'])]
tc_associate = functools.partial(caption_associate, caption_class='Table Caption', write_images_pth=write_images_pth)
grouped = pdf.groupby('page_num').apply(tc_associate)
final_ordering = []
for ind, order in grouped.iteritems():
final_ordering.extend(order)
return final_ordering
def aggregate_figures(pdf, write_images_pth):
pdf = pdf[pdf['postprocess_cls'].isin(['Figure', 'Figure Caption'])]
tc_associate = functools.partial(caption_associate, caption_class='Figure Caption', write_images_pth=write_images_pth)
grouped = pdf.groupby('page_num').apply(tc_associate)
final_ordering = []
for ind, order in grouped.iteritems():
final_ordering.extend(order)
return final_ordering
def aggregate_pdf(pdf):
pdf_obj = {}
content = ''
obj_pages = []
obj_bbs = []
for ind, row in pdf.iterrows():
if 'pdf_name' not in pdf_obj:
pdf_obj['pdf_name'] = row['pdf_name']
if 'dataset_id' not in pdf_obj:
pdf_obj['dataset_id'] = row['dataset_id']
content += f' {row["content"]}'
obj_pages.append(row['page_num'])
obj_bbs.append(row['bounding_box'])
pdf_obj['content'] = content
pdf_obj['obj_pages'] = obj_pages
pdf_obj['obj_bbs'] = obj_bbs
return pdf_obj
stream_types = ['sections', 'pdfs']
association_types = ['tables', 'figures']
full_page_types = ['equations']
def aggregate_router(ddf, aggregate_type, write_images_pth):
if aggregate_type in stream_types:
return stream_aggregate(ddf, aggregate_type)
elif aggregate_type in association_types:
return association_aggregate(ddf, aggregate_type, write_images_pth)
elif aggregate_type in full_page_types:
return full_page_aggregate(ddf, aggregate_type, write_images_pth)
else:
raise ValueError(f'Passed type not support for aggregation. Supported types are {stream_types + association_types}')
def full_page_aggregate(ddf, aggregate_type, write_images_pth):
if aggregate_type == 'equations':
ae = functools.partial(aggregate_equations, write_images_pth=write_images_pth)
result = ddf.groupby('pdf_name').apply(ae)
results = []
for pdf_name, sections in result.iteritems():
for section in sections:
results.append(section)
results_df = pd.DataFrame(results)
return results_df
def stream_aggregate(ddf, aggregate_type):
if aggregate_type == 'sections':
result = ddf.groupby('pdf_name').apply(aggregate_sections)
results = []
for pdf_name, sections in result.iteritems():
for section in sections:
results.append(section)
results_df = pd.DataFrame(results)
return results_df
if aggregate_type == 'pdfs':
result = ddf.groupby('pdf_name').apply(aggregate_pdf)
results = []
for pdf_name, item in result.iteritems():
results.append(item)
result_df = pd.DataFrame(results)
return result_df
def association_aggregate(ddf, aggregate_type, write_images_pth):
if aggregate_type == 'tables':
atab = functools.partial(aggregate_tables, write_images_pth=write_images_pth)
result = ddf.groupby('pdf_name').apply(atab)
results = []
for pdf_name, tables in result.iteritems():
for table in tables:
results.append(table)
results_df = pd.DataFrame(results)
return results_df
if aggregate_type == 'figures':
afig = functools.partial(aggregate_figures, write_images_pth=write_images_pth)
result = ddf.groupby('pdf_name').apply(afig)
results = []
for pdf_name, tables in result.iteritems():
for table in tables:
results.append(table)
results_df = | pd.DataFrame(results) | pandas.DataFrame |
from pandas import DataFrame, concat
from numpy import nan, isnan
from statsmodels.api import OLS, GEE, Logit
from statsmodels.api import families
from ravenclaw.wrangling import bring_to_front
from chronometry.progress import ProgressBar
import re
from .effect import Formula, MainEffect, InteractionEffect, Effect
from .convert_simple_table_to_dataframe import convert_simple_table_to_dataframe
from .convert_camel_to_snake import convert_camel_to_snake
from .exceptions import BrokenModel, BrokenSummary, FormulaError
class Regression:
def __init__(
self, data, model_builder, formula, significance_level=0.05, family=None, groups=None, parent=None
):
"""
:type data: DataFrame
:type model_builder: callable
:type formula: str or Formula
:type significance_level: float
:type family: families.Binomial or families.Family or NoneType
:type groups: NoneType or str or list[str]
:type previous_formulas: NoneType or list[Formula]
"""
self._formula = Formula(formula)
if self._formula.dependent_variable is None:
raise FormulaError(f'dependent variable missing from {self._formula}!')
self._groups = groups
self._data = data
self._significance_level = significance_level
self._model_builder = model_builder
self._family = family
self._groups = groups
self._variables = {effect.name: effect for effect in self._formula.effects}
self._model = None
self._fit = None
self._summary = None
self._summary_table = None
self._parent = parent
self._effects = None
self._significant_interaction_effects = None
self._significant_main_effects = None
self._insignificant_interaction_effects = None
self._insignificant_main_effects = None
def __repr__(self):
return f'formula: {self.formula.represent()}\ngroups: {self.groups}\nfamily: {self.family}'
def __str__(self):
return self.__repr__()
@property
def formulas(self):
"""
:rtype: list[Formula]
"""
if self._parent is None:
return [self.formula]
else:
return self._parent.formulas + [self.formula]
def display(self, p=None):
try:
from IPython.core.display import display
display((
self._model_builder,
{
'formula': self.formula,
'groups': self.groups,
'family': self.family,
'summary_table': self._summary_table
},
self.data.head()
))
except ImportError:
if p is not None:
p.pretty(self.__repr__())
else:
print(self.__repr__())
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('Regression')
else:
self.display(p=p)
def build_model(self):
"""
:rtype: OLS or GEE or Logit
"""
if self.family is None and self.groups is None:
model = self._model_builder(formula=self.formula.formula, data=self.data)
elif self.family is None:
model = self._model_builder(formula=self.formula.formula, data=self.data, groups=self.groups)
elif self.groups is None:
model = self._model_builder(formula=self.formula.formula, data=self.data, family=self.family)
else:
model = self._model_builder(
formula=self.formula.formula, data=self.data, groups=self.groups, family=self.family
)
return model
@property
def variables(self):
"""
:rtype: dict[str, MainEffect or InteractionEffect]
"""
return self._variables
@property
def model(self):
"""
:rtype: OLS or GEE or Logit
"""
if self._model is None:
self._model = self.build_model()
return self._model
@property
def fit(self):
"""
:rtype: OLS or GEE or Logit
"""
if self._fit is None:
try:
self._fit = self.model.fit()
except Exception as e:
self._fit = BrokenModel(exception=e, regression=self)
return self._fit
@property
def parameters(self):
"""
:rtype: dict
"""
return dict(self.fit.params)
@property
def coef_(self):
return {key: value for key, value in self.parameters.items() if key.lower() != 'intercept'}
@property
def intercept_(self):
try:
return self.parameters['Intercept']
except KeyError:
return self.parameters['intercept']
@property
def feature_importances_(self):
return self.fit.feature_importances_
def predict(self, data):
return self.fit.predict(data)
@property
def summary(self):
if self._summary is None:
self._summary = self.fit.summary()
return self._summary
@property
def coefficient_table(self):
"""
:rtype: DataFrame
"""
if isinstance(self.summary, BrokenSummary):
return DataFrame({'error': [self.summary.model.exception]})
else:
return convert_simple_table_to_dataframe(self.summary.tables[1]).rename(columns={
'coef': 'coefficient', 'std err': 'standard_error', '[0.025': 'lower_0.025',
'0.975]': 'upper_0.975', 'P>|z|': 'p'
})
@property
def model_table(self):
"""
:rtype: DataFrame
"""
result = convert_simple_table_to_dataframe(self.summary.tables[2], header=False).reset_index()
tables = []
for i in range(result.shape[1]//2):
table = result.iloc[:, i*2:i*2+2]
table.columns = ['name', 'value']
tables.append(table)
result = | concat(tables) | pandas.concat |
import os
from multiprocessing import Pool, cpu_count
from itertools import repeat
import pandas as pd
from solvers.solvers import SOLVER_MAP
from problem_classes.random_qp import RandomQPExample
from problem_classes.eq_qp import EqQPExample
from problem_classes.portfolio import PortfolioExample
from problem_classes.lasso import LassoExample
from problem_classes.svm import SVMExample
from problem_classes.huber import HuberExample
from problem_classes.control import ControlExample
from utils.general import make_sure_path_exists
examples = [RandomQPExample,
EqQPExample,
PortfolioExample,
LassoExample,
SVMExample,
HuberExample,
ControlExample]
EXAMPLES_MAP = {example.name(): example for example in examples}
class Example(object):
'''
Examples runner
'''
def __init__(self, name,
dims,
solvers,
settings,
n_instances=10):
self.name = name
self.dims = dims
self.n_instances = n_instances
self.solvers = solvers
self.settings = settings
def solve(self, parallel=True):
'''
Solve problems of type example
The results are stored as
./results/benchmark_problems/{solver}/{class}/n{dimension}.csv
using a pandas table with fields
- 'class': example class
- 'solver': solver name
- 'status': solver status
- 'run_time': execution time
- 'iter': number of iterations
- 'obj_val': objective value
- 'n': leading dimension
- 'N': nnz dimension (nnz(P) + nnz(A))
'''
print("Solving %s" % self.name)
print("-----------------")
if parallel:
pool = Pool(processes=min(self.n_instances, cpu_count()))
# Iterate over all solvers
for solver in self.solvers:
settings = self.settings[solver]
# Initialize solver results
results_solver = []
# Solution directory
path = os.path.join('.', 'results', 'benchmark_problems',
solver,
self.name
)
# Create directory for the results
make_sure_path_exists(path)
# Get solver file name
solver_file_name = os.path.join(path, 'full.csv')
for n in self.dims:
# Check if solution already exists
n_file_name = os.path.join(path, 'n%i.csv' % n)
if not os.path.isfile(n_file_name):
if parallel:
instances_list = list(range(self.n_instances))
n_results = pool.starmap(self.solve_single_example,
zip(repeat(n),
instances_list,
repeat(solver),
repeat(settings)))
else:
n_results = []
for instance in range(self.n_instances):
n_results.append(
self.solve_single_example(n,
instance,
solver,
settings)
)
# Combine n_results
df = pd.concat(n_results)
# Store n_results
df.to_csv(n_file_name, index=False)
else:
# Load from file
df = pd.read_csv(n_file_name)
# Combine list of dataframes
results_solver.append(df)
# Create total dataframe for the solver from list
df_solver = | pd.concat(results_solver) | pandas.concat |
#!/usr/bin/env python
import argparse, sys, copy, gzip, time, math, re
import numpy as np
import pandas as pd
from scipy import stats
from collections import Counter, defaultdict, namedtuple
import statsmodels.formula.api as smf
from operator import itemgetter
import warnings
from svtools.vcf.file import Vcf
from svtools.vcf.genotype import Genotype
from svtools.vcf.variant import Variant
import svtools.utils as su
def todosage(gt):
if gt == '0/0':
return 0
elif gt == '0/1':
return 1
elif gt == '1/1':
return 2
else:
return np.nan
def ld_calc(curlist, keep, ld_outfile, winsz):
varlist=[var.var_id for var in curlist]
df= | pd.DataFrame(index=keep, columns=varlist) | pandas.DataFrame |
import pandas as pd
import numpy as np
xls = | pd.ExcelFile("~/Desktop/query_ranks.xlsx") | pandas.ExcelFile |
#!usr/bin/env python
from sklearn.datasets import load_boston
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from sklearn.model_selection import train_test_split
#1. Load & Visualize data
boston_data = load_boston()
df = pd.DataFrame(data= boston_data['data'], columns= boston_data['feature_names'])
df["MEDV"] = boston_data['target']
#2. Plotting:
sns.set(color_codes=True)
#2.1 Plot & calculate some overviews
#2.1.1 Create Directories to save figs
if not (os.path.exists('./Figures')):
os.makedirs('./Figures')
os.makedirs('./Figures/Cols-Histograms')
os.makedirs('./Figures/Cols-Scatters')
os.makedirs('./Figures/multiple_features_plotly')
#2.1.2 Pairplot
print("Creating overview!")
sns.pairplot(df)
plt.savefig("./Figures/Pairplot.png")
plt.close()
#2.1.3 Correlation matrix
correlation_matrix = df.corr().round(2)
plt.figure(figsize=(20, 15))
sns.heatmap(data=correlation_matrix, annot=True)
plt.savefig("./Figures/Correlation_Matrix.png")
plt.close()
#2.1.4 Max & Min Corr. to MEDV
medv_corr = correlation_matrix.iloc[13, :-1]
maxcor_col = medv_corr.idxmax()
mincor_col = medv_corr.idxmin()
print("Max Correlation with MEDV: {0}, Corr. value = {1}".format(
maxcor_col, max(medv_corr)))
print("Min Correlation with MEDV: {0}, Corr. value = {1}".format(
mincor_col, min(medv_corr)))
#2.2 Plot Features
#2.2.1 Histogram for each col.
print("Creating histograms and scatter plots!")
for col in df:
idx = df.columns.get_loc(col)
sns.distplot(df[col].values,rug=False,bins=50).set_title("Histogram of {0}".format(col))
plt.savefig("./Figures/Cols-Histograms/{0}_{1}.png".format(idx,col), dpi=100)
plt.close()
#2.2.2 Scatterplot and a regression line for each column with 'MEDV'
if (col == 'MEDV'):
continue
sns.regplot(df[col], df['MEDV'], color='r')
plt.xlabel('Value of {0}'.format(col))
plt.ylabel('Value of MEDV')
plt.title('Scatter plot of {0} and MEDV'.format(col))
plt.savefig("./Figures/Cols-Scatters/{0}_{1}_MEDV".format(idx,col), dpi=200)
plt.close()
#2.2.3 Scatterplot for +3 features
print("Creating plots for 4 features!")
sorted_df = df.sort_values("MEDV")
sorted_df = sorted_df.reset_index(drop=True)
for col in sorted_df:
if(col == maxcor_col or col == mincor_col or col == 'MEDV'):
continue
idx = df.columns.get_loc(col)
trace0 = go.Scatter(
x=sorted_df['MEDV'],
y=sorted_df[maxcor_col],
mode='lines',
name=maxcor_col
)
trace1 = go.Scatter(
x=sorted_df['MEDV'],
y=sorted_df[mincor_col],
mode='lines',
name=mincor_col
)
trace2 = go.Scatter(
x=sorted_df['MEDV'],
y=sorted_df[col],
mode='lines',
opacity=0.8,
name=col
)
data = [trace0, trace1, trace2]
layout = go.Layout(
title='MEDV vs {0}, {1}, {2}'.format(
maxcor_col, mincor_col, col),
yaxis=dict(title='MEDV'),
xaxis=dict(title='{0}, {1}, {2}'.format(
maxcor_col, mincor_col, col)),
plot_bgcolor="#f3f3f3"
)
fig = go.Figure(data=data, layout=layout)
plot(fig, filename="./Figures/multiple_features_plotly/{0}_{1}.html".format(idx, col), auto_open=False)
#3. Apply Regressorss
print("Creating and fitting Regression Model!")
#3.1 Split the data into training and testing
df_train, df_test, medv_train, medv_test = train_test_split(boston_data["data"], boston_data["target"])
#3.2 Linear Regression
from sklearn.metrics import mean_squared_error
from sklearn import linear_model
#3.2.1 Make a model and fit the values
lr_reg = linear_model.LinearRegression()
lr_reg.fit(df_train, medv_train)
predicted_medv = lr_reg.predict(df_test)
expected_medv = medv_test
#3.2.2 Linear Regression performance
from sklearn.metrics import r2_score
lr_mse = round(mean_squared_error(expected_medv, predicted_medv),3)
lr_r2 = round(r2_score(expected_medv, predicted_medv),5)
plt.figure(figsize=(16, 9), dpi=200)
plt.subplot(2, 2, 1)
sns.regplot(expected_medv, predicted_medv, color='g')
plt.ylabel('Predicted Value')
plt.title('Linear Regression.\nMSE= {0} , R-Squared= {1}'.format(lr_mse,lr_r2))
#3.3 Bayesian Ridge Linear Regression
#3.3.1 Make a model and fit the values
br_reg = linear_model.BayesianRidge()
br_reg.fit(df_train, medv_train)
predicted_medv = br_reg.predict(df_test)
#3.3.2 Model performance
br_mse = round(mean_squared_error(expected_medv, predicted_medv),3)
br_r2 = round(r2_score(expected_medv, predicted_medv),5)
plt.subplot(2, 2, 2)
sns.regplot(expected_medv, predicted_medv, color='red')
plt.title('Bayesian Ridge Linear Regression.\nMSE= {0} , R-Squared= {1}'.format(br_mse, br_r2))
#3.4 Lasso
#3.4.1 Creating a model and fit it
lasso_reg = linear_model.LassoLars(alpha=.1)
lasso_reg.fit(df_train, medv_train)
predicted_medv = lasso_reg.predict(df_test)
#3.4.2 Model performance
lasso_mse = round(mean_squared_error(expected_medv, predicted_medv),3)
lasso_r2 = round(r2_score(expected_medv, predicted_medv),5)
plt.subplot(2, 2, 3)
sns.regplot(expected_medv, predicted_medv, color='orange')
plt.xlabel('Expected Value')
plt.ylabel('Predicted Value')
plt.title('Lasso Linear Regression.\nMSE= {0} , R-Squared= {1}'.format(lasso_mse, lasso_r2))
#3.5 Gradient boosted tree
from sklearn.ensemble import GradientBoostingRegressor
#3.5.1 Make a model and fit the values
gb_reg= GradientBoostingRegressor(loss='ls')
gb_reg.fit(df_train, medv_train)
predicted_medv = gb_reg.predict(df_test)
#3.5.2 Gradient Boosting performance
gb_mse = round(mean_squared_error(expected_medv, predicted_medv),3)
gb_r2 = round(r2_score(expected_medv, predicted_medv),5)
plt.subplot(2, 2, 4)
sns.regplot(expected_medv, predicted_medv, color='b')
plt.xlabel('Expected Value')
plt.title('Gradient Boosting.\nMSE= {0} , R-Squared= {1}'.format(gb_mse,gb_r2))
plt.tight_layout()
plt.savefig("./Figures/Regression_Models.png")
plt.close()
d = {'Model':['Linear Regression', 'Bayesian Ridge' ,'Lasso', 'Gradient Boosting'],
'Variable': [lr_reg, br_reg, lasso_reg, gb_reg],
'MSE': [lr_mse, br_mse, lasso_mse, gb_mse],
'R-Squared': [lr_r2, br_r2, lasso_r2, gb_r2]
}
results_df = | pd.DataFrame(data=d) | pandas.DataFrame |
from tqdm import tqdm
import pandas as pd
import numpy as np
import json
import os
os.chdir("/media/data/bderinbay/rvc_devkit/datasets/json_file/")
with open('../joined_val_boxable.json') as f:
data = json.load(f)
df_cats = pd.read_csv('categories.csv')
df_images = pd.DataFrame(data['images'])
df_images = df_images.drop(['ds_id'], axis=1)
df_images['dataset_name'] = df_images.file_name.apply(lambda x: x.split("/")[0])
df_anno = pd.DataFrame(data['annotations'])
df_anno = df_anno[:1504179]
df_anno = df_anno.drop(['file_name', 'segments_info'], axis=1)
sample_columns = ['image_id','bbox','category_id', 'category_name', 'file_name', 'height', 'width','ds_id']
"""
df_sample = pd.DataFrame([], columns=sample_columns)
"""
del data
#return-> string: category name
def find_category(cat_id):
try:
return df_cats.name[df_cats.id == cat_id].values[0]
except:
return "no-catagory"
#return-> dict annoset counted as dataset
def get_dataset_counts(anno_set):
dataset_names = ['coco', 'objects365', 'oid', 'mvs']
anno_set_db = anno_set.groupby(['dataset_name']).count()
cols = [col for col in anno_set_db.columns if col not in ['dataset_name', 'id']]
anno_set_db = anno_set_db.drop(cols, axis=1)
anno_set_dbcount_dict = anno_set_db.to_dict()['id']
for dbname in dataset_names:
if(dbname not in anno_set_dbcount_dict.keys()):
anno_set_dbcount_dict[dbname] = 0
return anno_set_dbcount_dict
#drop mismatched columns and join two dataframe
df_joined = df_anno.set_index('image_id').join(df_images.set_index('id'))
df_sample = pd.DataFrame([], columns=sample_columns)
for cat_id in tqdm(df_cats.id.values):
anno_set = df_joined[df_joined.category_id == cat_id]
db_count = get_dataset_counts(anno_set)
#increase the value for added annotations
limit = 15
db_quota = {'coco': 0, 'mvs': 0, 'objects365': 0, 'oid': 0}
number_of_dataset=0
#count how many database has annotation in category<-cat_id
for x in db_count:
if(db_count[x] > 0):
number_of_dataset += 1
#ilgili kategoriden hiçbir dataset te bulunamazsa
if(number_of_dataset == 0):
print(cat_id,": not found in -> ",number_of_dataset)
else:
#calculate avg quota for each database
quota_foreach_db = int(limit / number_of_dataset)
#loop through annotation set
for a in tqdm(anno_set.itertuples()):
if(db_quota[a.dataset_name] < quota_foreach_db):
df_a = pd.DataFrame([a])
#it uses the image_id's as Index, rename it for convenience
df_a = df_a.rename(columns={"Index": "image_id"})
df_a['category_name'] = find_category(df_a['category_id'].values[0])
df_sample = df_sample.append(df_a)
db_quota[a.dataset_name]+=1
#df_sample has zeros on index, reset and drop them
df_sample = df_sample.reset_index(drop=True)
#console outputs
print(df_sample.shape)
print(df_sample.head(5))
df_sample.to_csv('df_sample_val_v1.csv',index=False)
print("###############################")
df_sample = pd.read_csv('df_sample_val_v1.csv')
image_id_set = set(df_sample['image_id'].values)
anno_set = df_anno[df_anno.image_id.isin(image_id_set)]
df_image_set = df_images[df_images.id.isin(image_id_set)]
df_set_joined = anno_set.set_index('image_id').join(df_image_set.set_index('id'))
df_set_joined.to_csv('val_joined_annotations_v1.csv',index=False)
#will be edit with apply method
for a in tqdm(df_set_joined.itertuples()):
df_a = | pd.DataFrame([a]) | pandas.DataFrame |
import pandas as pd
dicts = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98]}
brics = | pd.DataFrame(dicts) | pandas.DataFrame |
def main():
import pandas as pd
from googletrans import Translator
import nltk
import spacy
import re
from word2number import w2n
from IPython.display import display, HTML, Javascript
from spacy import displacy
| pd.set_option('display.max_columns', 7) | pandas.set_option |
# -*- coding: utf-8 -*-
"""Annotating vector neighborhoods.
This module provides functionality for annotating vector neighborhoods
obtained from a number of word embeddings.
This is the second (and most important) step in LDT analysis default_workflow. The
input is pre-computed vector neighborhood files that are prepared with
:class:`~ldt.experiments.neighbors.VectorNeighborhoods` class. See the
documentation of that class for more details.
The output is saved in the experiments/neighbors_annotated/your_experiment_name
subfolder of the ldt resource folder specified in the configuration file.
These are tab-separated data files with columns indicating the presence of a
binary relation in target_neighbor word pairs (e.g. whether they are
synonyns), or a numerical indicator of a relationship (e.g. the distance
between them in an ontology). See the full list of available scores `here
<http://ldtoolkit.space/ldscores/>`_.
Todo:
* parsing arguments from command line
* ldt resource settings saved to metadata
* add progressbars
* multicore processing
"""
import ldt
import os
import uuid
import pandas as pd
import numpy as np
from tqdm import tqdm
#import multiprocessing
#import multiprocessing.pool
# from billiard import
from p_tqdm import p_map
#from progressbar.bar import ProgressBar
# from pathos.multiprocessing import ProcessingPool
# from multiprocessing import Pool
#import multiprocessing.managers as m
from vecto.utils.data import load_json
from ldt.experiments.metadata import Experiment
from ldt.load_config import config
from ldt.dicts.normalize import Normalization
from ldt.dicts.derivation.meta import DerivationAnalyzer
from ldt.dicts.semantics.metadictionary import MetaDictionary
from ldt.relations.pair import RelationsInPair
from ldt.relations.distribution import DistributionDict
from ldt.load_config import config
# class NoDaemonProcess(multiprocessing.Process):
# def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
# *, daemon=None):
# super(NoDaemonProcess, self).__init__(group, target, name, args, kwargs, daemon=daemon)
# if 'daemon' in multiprocessing.process._current_process._config:
# del multiprocessing.process._current_process._config['daemon']
# self._config = multiprocessing.process._current_process._config.copy()
# # make 'daemon' attribute always return False
# def _get_daemon(self):
# return False
# def _set_daemon(self, value):
# pass
# daemon = property(_get_daemon, _set_daemon)
#
# # We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# # because the latter is only a wrapper function, not a proper class.
# class MyPool(multiprocessing.pool.Pool):
# Process = NoDaemonProcess
class AnnotateVectorNeighborhoods(Experiment):
"""This class provides a simple interface for annotating pre-computed top_n
vector neighborhoods for a given vocabulary sample.
Vecto-style metadata is also generated."""
#pylint: disable=too-many-arguments
def __init__(self, experiment_name=config["experiments"]["experiment_name"],
extra_metadata=None,
overwrite=config["experiments"]["overwrite"], ld_scores="main",
output_dir=os.path.join(config["path_to_resources"],
"experiments"),
ldt_analyzer=None,
multiprocessing=config["experiments"]["multiprocessing"],
debugging=False):
""" Annotating pre-computed top *n* neighbors for a given vocab sample
Args:
experiment_name (str): the human-readable name for the
current experiment, which will be used to make a subfolder
storing the generated data. If None, the folder will be simply
timestamped.
extra_metadata (dict): any extra fields to be added to the
experiment metadata (overwriting any previously existing fields)
output_dir (str): the *existing* path for saving the *subfolder*
named with the specified experiment_name, where the output data
and metadata.json file will be saved.
overwrite (bool): if True, any previous data for the same
experiment will be overwritten, and the experiment will be
re-started.
ldt_analyzer: :class:`~ldt.relations.pair.RelationsInPair`
instance, with lexicographic, morphological and normalization
resources set up as desired (see tutorial and
class documentation). If None, default settings for English
will be used.
ld_scores (str or list of str): "all" for all supported scores,
or a list of ld_scores. Supported values are:
- "SharedPOS",
- "SharedMorphForm",
- "SharedDerivation",
- "NonCooccurring",
- "GDeps",
- "TargetFrequency",
- "NeighborFrequency",
- "Associations",
- "ShortestPath",
- "Synonyms",
- "Antonyms",
- "Meronyms",
- "Hyponyms",
- "Hypernyms",
- "OtherRelations",
- "Numbers",
- "ProperNouns",
- "Noise",
- "URLs",
- "Filenames",
- "ForeignWords",
- "Hashtags"
- 'TargetFrequency',
- 'NeighborFrequency'.
See more details for these scores `here
<http://ldtoolkit.space/ldscores/>`_.
Returns:
(None): the annotated neighbors file will be written to disk
together with the experiment metadata.
"""
super(AnnotateVectorNeighborhoods, self).__init__(
experiment_name=experiment_name, extra_metadata=extra_metadata, \
overwrite=overwrite, embeddings=None, output_dir=output_dir,
dataset=None, experiment_subfolder="neighbors_annotated")
self.metadata["task"] = "annotate_neighbors"
self.metadata["uuid"] = str(uuid.uuid4())
self.metadata["ldt_config"] = config
self.metadata["output_dir"] = self.output_dir
self.metadata["debugging"] = debugging
self.metadata["multiprocessing"] = multiprocessing
self._load_dataset(dataset=None)
neighbors_metadata_path = self.output_dir.replace(
"neighbors_annotated", "neighbors")
neighbors_metadata_path = os.path.join(neighbors_metadata_path,
"metadata.json")
if not os.path.isfile(neighbors_metadata_path):
raise IOError("The metadata for the neighborhood generation task "
"was not found at "+neighbors_metadata_path)
else:
self.metadata["neighbors_metadata_path"] = neighbors_metadata_path
neighbors_metadata = load_json(neighbors_metadata_path)
self.metadata["embeddings"] = neighbors_metadata["embeddings"]
self.embeddings = []
for embedding in self.metadata["embeddings"]:
self.embeddings.append(embedding["path"])
self.message = "\n\nStarting LD annotation. This will take a while " \
"for " \
"the first files, but the remainder should go faster, " \
"because many neighbor pairs will be the same."
# self.metadata["failed_pairs"] = []
self.metadata["missed_pairs"] = []
self.metadata["total_pairs"] = 0
self.supported_vars = ["SharedPOS", "SharedMorphForm",
"SharedDerivation", "NonCooccurring",
"GDeps", "TargetFrequency",
"NeighborFrequency", "Associations",
"ShortestPath", "Synonyms", "Antonyms",
"Meronyms", "Hyponyms", "Hypernyms",
"OtherRelations", "Numbers", "ProperNouns",
"Misspellings", "URLs", "Filenames",
"ForeignWords", "Hashtags", "Noise"]
self.continuous_vars = ['ShortestPath', 'TargetFrequency',
'NeighborFrequency']
corpus_specific = ["NonCooccurring", "TargetFrequency", "NeighborFrequency"]
if not config["corpus"]:
for i in [self.supported_vars, self.continuous_vars]:
i = [x for x in i if not i in corpus_specific]
self.binary_vars = [x for x in self.supported_vars if not \
x in self.continuous_vars]
ld_scores_error = "The ld_scores argument is invalid. It should be " \
"'all' for all supported relations, or a list with " \
"one or more of the following values:\n" + \
", ".join(self.supported_vars)
if ld_scores == "all":
self._ld_scores = self.supported_vars
elif ld_scores == "main":
exclude = ["ShortestPath", "URLs", "Filenames", "Hashtags",
"Noise"]
if not config["corpus"]:
exclude += ["NonCooccurring", "GDeps", "TargetFrequency",
"NeighborFrequency"]
self._ld_scores = [x for x in self.supported_vars if not x in
exclude]
else:
if isinstance(ld_scores, list):
unsupported = [x for x in ld_scores if not
x in self.supported_vars]
if unsupported:
raise ValueError(ld_scores_error)
else:
self._ld_scores = [x for x in self.supported_vars if x
in ld_scores]
else:
raise ValueError(ld_scores_error)
self.metadata["ld_scores"] = self._ld_scores
self.metadata["continuous_vars"] = self.continuous_vars
self.metadata["binary_vars"] = self.binary_vars
self.ldt_analyzer = ldt_analyzer
# global metadata
# metadata = self.metadata
#
# global global_analyzer
# global_analyzer = ldt_analyzer
# global_analyzer = init_analyzer(path=neighbors_metadata_path,
# analyzer=ldt_analyzer)
def _load_dataset(self, dataset):
"""Dataset for generating vector neighborhoods was already processed in
the previous stage of the experiment, so nothing needs to be done
here."""
pass
def _process(self, embeddings_path):
global prior_data
prior_data = collect_prior_data(self.metadata["output_dir"])
# print("collected prior data", len(prior_data))
global metadata
metadata = self.metadata
global global_analyzer
global_analyzer = self.ldt_analyzer
filename = self.get_fname_for_embedding(embeddings_path)
neighbor_file_path = os.path.join(self.output_dir.replace(
"neighbors_annotated", "neighbors"), filename+".tsv")
print("\nAnnotating "+neighbor_file_path)
self.metadata["out_path"] = os.path.join(self.output_dir,
filename+".tsv")
input_df = pd.read_csv(neighbor_file_path, header=0, sep="\t")
self.metadata["total_pairs"] += len(input_df)
dicts = input_df.to_dict(orient="records")
if metadata["multiprocessing"] == 1:
print("\nMultiprocessing: 1 core")
newdicts = []
for d in tqdm(dicts):
newdicts.append(_process_one_dict(d))
# # newdicts.append(self._process_one_dict_meth(d))
dicts = newdicts
# dicts = [_process_one_dict(x) for x in dicts]
# self.save_results(dicts)
else:
print("\nMultiprocessing:", metadata["multiprocessing"], "cores")
#python multiprocessing library
# pool = Pool(metadata["multiprocessing"], initializer=initializer(global_analyzer))
# dicts = pool.map(_process_one_dict, dicts)
# #pathos.multiprocessing
# pool = ProcessingPool(nodes=metadata["multiprocessing"])
# dicts = pool.map(_process_one_dict, dicts)
#try with method
# t_dicts = []
# for d in dicts:
# t_dicts.append((d,))
# pool = ProcessingPool(nodes=metadata["multiprocessing"])
# dicts = pool.map(self._process_one_dict_meth, dicts)
dicts = p_map(_process_one_dict, dicts, num_cpus=metadata["multiprocessing"])
# self.save_results(dicts)
# pool = MyPool(metadata["multiprocessing"])
# dicts = pool.map(_process_one_dict, dicts)
# pool.close()
# pool.join()
dicts = self.add_distr_data(dicts)
self.save_results(dicts, overwrite=True)
def save_results(self, dicts, overwrite=False):
output_df = pd.DataFrame(dicts,
columns=["Target", "Rank", "Neighbor",
"Similarity"]+self._ld_scores)
if not os.path.exists(self.metadata["out_path"]):
output_df.to_csv(self.metadata["out_path"], index=False,
sep="\t", header=True)
else:
if not overwrite:
# existing_df = pd.read_csv(self.metadata["out_path"], header=0, sep="\t")
# existing_dicts = existing_df.to_dict(orient="records")
# if not existing_dicts == dicts:
output_df.to_csv(self.metadata["out_path"], index=False, sep="\t",
mode="a", header=False)
else:
output_df.to_csv(self.metadata["out_path"], index=False,
sep="\t", header=True)
def _postprocess_metadata(self):
"""Helper method for logging unique failed target:neighbor pairs and
calculating the overall coverage (considered as number of non-unique
pairs for which dictionary data was successfully found)."""
del self.metadata["continuous_vars"]
del self.metadata["binary_vars"]
# find missing data
input_df = pd.read_csv(self.metadata["out_path"], header=0,
sep="\t")
dicts = input_df.to_dict(orient="records")
set_dicts = {}
for i in dicts:
set_dicts[i["Target"]+":"+i["Neighbor"]] = i
if not self._ld_scores[0] in i:
self.metadata["missed_pairs"].append(i["Target"]+":"+i["Neighbor"])
else:
try:
if np.isnan(i[self._ld_scores[0]]):
self.metadata["missed_pairs"].append(i["Target"] + ":" + i["Neighbor"])
except TypeError:
continue
try:
if | pd.isnull(i[self._ld_scores[0]]) | pandas.isnull |
#Copyright [2020] [Indian Institute of Science, Bangalore]
#SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sim_data_dir = '/home/nihesh/Documents/COVID-19/sim_data/'
file_folder_list = os.listdir(sim_data_dir)
no_data_dir = ['Results', 'combined_plots', 'combined_plots_200K']
for file_folder_name in file_folder_list:
if (os.path.isdir(sim_data_dir+file_folder_name) and no_data_dir.count(file_folder_name)==0):
data_dir = sim_data_dir+file_folder_name+'/'
result_dir = '/home/nihesh/Documents/COVID-19/sim_data_means/'+file_folder_name+'/'
file_list = os.listdir(data_dir)
if not (os.path.exists(result_dir)):
os.mkdir(result_dir)
print (data_dir)
column_names = ['timestep', 'affected', 'recovered', 'infected', 'exposed', 'hospitalised', 'critical', 'dead']
master_df = | pd.DataFrame(columns=column_names) | pandas.DataFrame |
import matplotlib.pyplot as plt
import requests
import json
import pandas as pd
import numpy as np
# Округа Москвы
COLORS = ['y', 'b', 'r', 'g', 'c', 'm', 'lime', 'gold', 'orange', 'coral', 'purple', 'grey']
DISTRICT = {"Восточный административный округ": [55.787710, 37.775631],
"Западный административный округ": [55.728003, 37.443533],
"Зеленоградский административный округ": [55.987583, 37.194250],
"Новомосковский административный округ": [55.558121, 37.370724],
"Северный административный округ": [55.838384, 37.525765],
"Северо-Восточный административный округ": [55.863894, 37.620923],
"Северо-Западный административный округ": [55.829370, 37.451546],
"Троицкий административный округ": [55.355771, 37.146990],
"Центральный административный округ": [55.753995, 37.614069],
"Юго-Восточный административный округ": [55.692019, 37.754583],
"Юго-Западный административный округ": [55.662735, 37.576178],
"Южный административный округ": [55.610906, 37.681479]}
# Название округа
DISTRICT_NAME = ['ВАО', 'ЗАО', 'ЗелАО', 'Новомосковский АО', 'САО', 'СВАО', 'СЗАО', 'Троицкий АО', 'ЦАО', 'ЮВАО', 'ЮЗАО', 'ЮАО']
# POST запрос данных
def get_data(url, filename):
URL = url
client = requests.session()
client.get(URL)
res = requests.post(URL, headers=dict(Referer=URL))
with open(filename, 'w') as outfile:
json.dump(res.json(), outfile, ensure_ascii=False, separators=(',', ': '), indent=4, sort_keys=False)
class network_KH:
def __init__(self, values, centers):
self.values = np.array(values)
self.centers = np.array(centers)
self.weights = np.zeros((len(values), len(centers)))
def euclidDist(self, a, b):
return np.linalg.norm(a - b)
def find_weights(self):
for value_i in range(len(self.values)):
for center_i in range(len(self.centers)):
self.weights[value_i][center_i] = self.euclidDist(self.values[value_i], self.centers[center_i])
for value_i in range(len(self.values)):
min_index = self.weights[value_i].argmin()
self.weights[value_i][min_index] = 1
self.weights[value_i][0:min_index] = 0
self.weights[value_i][min_index + 1:] = 0
return self.weights
class ClusterAnalysis():
def __init__(self, data_lyceums):
self.read_json(data_lyceums)
_, self.ax = plt.subplots()
self.save_data('init.png')
# Считывание JSON
def read_json(self, data_lyceums):
json_data = open(data_lyceums).read()
data = json.loads(json_data)
lyceums_data = [data['features'][i]['geometry']['coordinates'] for i in
range(len(data['features']))]
dist_data = [data['features'][i]['properties']['Attributes']['okrug'] for i in
range(len(data['features']))]
name_data = [data['features'][i]['properties']['Attributes']['name'] for i in
range(len(data['features']))]
lyceums = pd.DataFrame(lyceums_data, columns=['x', 'y'])
lyceums['districts'] = dist_data
lyceums['color'] = 'k'
lyceums['size'] = 6
lyceums['name'] = name_data
self.lyceums = lyceums
districts_data = DISTRICT.values()
districts = | pd.DataFrame(districts_data, columns=['y', 'x']) | pandas.DataFrame |
## IMPORTING THE DEPENDENCIES
import pandas as pd
import numpy as np
from scipy.io import loadmat
from utils import compare_date, clean_full_date
## LOADING THE .MAT FILE
imdb_mat = './dataset/unprocessed/imdb_crop/imdb.mat'
imdb_data = loadmat(imdb_mat)
imdb = imdb_data['imdb']
## PROCESS THE DATA
# The data stores the dob as matlab serial number in the .mat file. Along with that it stores
# the dob in the filename of each image also. In the image filename, it is stored as normal date.
# So reading that is simpler
up_full_path = imdb[0][0][2][0] # Path of the images
up_gender = imdb[0][0][3][0] # Gender
up_face_score1 = imdb[0][0][6][0] # Score of the first image
up_face_score2 = imdb[0][0][7][0] # Score of the second image (NaN if there is no second image)
# Getting the gender
p_gender = []
for gender in up_gender:
if gender == 1:
p_gender.append('Male')
elif gender == 0:
p_gender.append('Female')
else:
p_gender.append('Unknown')
# Getting the dob and path
p_dob = []
p_path = []
for path in up_full_path:
temp = path[0].split('_')
photo_taken = temp[3].split('.')[0]
dob = clean_full_date(temp[2])
p_dob.append(compare_date(dob, photo_taken))
p_path.append('imdb_crop/' + path[0])
# Stacking the data
imdb_processed_data = np.vstack((p_dob, p_gender, p_path, up_face_score1, up_face_score2)).T
## SAVING THE DATA
# Making a datagrame from the data
cols = ['age', 'gender', 'path', 'f1_score', 'f2_score']
imdb_df = | pd.DataFrame(imdb_processed_data) | pandas.DataFrame |
from glob import glob
from typing import Dict, List, Union
import json
import os
import zipfile
from urllib.error import HTTPError
from torch.utils.data import DataLoader
from torchvision.transforms.transforms import Compose
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import wget
from ..ser_slice_dataset import SERSliceDataset, SERSliceTestDataset, SERInferenceDataset
from ..features.transform import FilterBank
emo2idx = {emo: i for i, emo in enumerate(['Neutral', 'Angry', 'Happy', 'Sad', 'Frustrated'])}
idx2emo = {v: k for k, v in emo2idx.items()}
correctemo = {
'Neutral': 'Neutral',
'Angry': 'Anger',
'Happy': 'Happiness',
'Sad': 'Sadness',
'Frustrated': 'Frustration'
}
class ThaiSERDataModule(pl.LightningDataModule):
def __init__(
self,
test_fold: int,
agreement_threshold: float = 0.71,
sampling_rate: int = 16000,
num_mel_bins: int = 40,
frame_length: int = 50, # in ms
frame_shift: int = 10, # in ms
center_feats: bool = True,
scale_feats: bool = True,
mic_type: str = 'con',
download_dir: str = None,
experiment_dir: str = None,
include_zoom: bool = True,
max_len: int = 3,
batch_size: int = 64,
emotions: List[str] = None,
num_workers: int = 0,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if emotions is None:
emotions = ["neutral", "anger", "happiness", "sadness"]
# loading dataset config
self.agreement_threshold = agreement_threshold
self.mic_type = mic_type
self.test_fold = test_fold
# dataset config
self.max_len = max_len
self.batch_size = batch_size
self.sampling_rate = sampling_rate
self.frame_length = frame_length
self.frame_shift = frame_shift
self.sec_to_frame = 10 * self.frame_shift
self.num_mel_bins = num_mel_bins
self.num_workers = num_workers
# normalizing sample
self.center_feats = center_feats
self.scale_feats = scale_feats
# config n_classes, avail emotion
self.include_zoom = include_zoom
self.emotions = emotions
self.n_classes = len(self.emotions)
# config download dir
if download_dir is None:
self.download_root = f"{os.path.expanduser('~')}/vistec-ser_tmpfiles/vistec"
else:
self.download_root = f"{download_dir}/vistec-ser_tmpfiles/vistec"
if not os.path.exists(self.download_root):
os.makedirs(self.download_root)
# config experiment dir
if experiment_dir is None:
self.experiment_root = f"{os.path.expanduser('~')}/vistec-ser_tmpfiles/exp_vistec"
else:
self.experiment_root = f"{experiment_dir}"
self.experiment_dir = f"{self.experiment_root}/fold{self.test_fold}"
if not os.path.exists(self.experiment_dir):
os.makedirs(self.experiment_dir)
# define download URL
version = 1
release_url = f"https://github.com/vistec-AI/dataset-releases/releases/download/v{version}"
self.github_url = {
"studio1-10": release_url+f"/studio1-10.zip",
"studio11-20": release_url+f"/studio11-20.zip",
"studio21-30": release_url+f"/studio21-30.zip",
"studio31-40": release_url+f"/studio31-40.zip",
"studio41-50": release_url+f"/studio41-50.zip",
"studio51-60": release_url+f"/studio51-60.zip",
"studio61-70": release_url+f"/studio61-70.zip",
"studio71-80": release_url+f"/studio71-80.zip",
"zoom1-10": release_url+f"/zoom1-10.zip",
"zoom11-20": release_url+f"/zoom11-20.zip",
}
self.labels_url = release_url+f"/emotion_label.json"
# define fold split
self.fold_config = {
0: [f"studio{s:03d}" for s in range(1, 11)],
1: [f"studio{s:03d}" for s in range(11, 21)],
2: [f"studio{s:03d}" for s in range(21, 31)],
3: [f"studio{s:03d}" for s in range(31, 41)],
4: [f"studio{s:03d}" for s in range(41, 51)],
5: [f"studio{s:03d}" for s in range(51, 61)],
6: [f"studio{s:03d}" for s in range(61, 71)],
7: [f"studio{s:03d}" for s in range(71, 81)],
8: [f"zoom{s:03d}" for s in range(1, 11)],
9: [f"zoom{s:03d}" for s in range(11, 21)]
}
assert self.test_fold in self.fold_config.keys()
self.studio_list = []
for studios in self.fold_config.values():
for s in studios:
self.studio_list.append(s)
self.train = None
self.val = None
self.test = None
self.zoom = None
def set_fold(self, fold):
self.test_fold = fold
self.experiment_dir = f"{self.experiment_root}/fold{self.test_fold}"
if not os.path.exists(self.experiment_dir):
os.makedirs(self.experiment_dir)
def prepare_data(self):
"""Run once as a preparation: download dataset, generate csv labels"""
self._download()
self._prepare_labels()
def setup(self, *args, **kwargs):
train_folds = self.fold_config.keys() if self.include_zoom else list(self.fold_config.keys())[:-2]
self.train = pd.concat([pd.read_csv(f"{self.download_root}/fold{i}.csv")
for i in train_folds if i != self.test_fold])
test_split = pd.read_csv(f"{self.download_root}/fold{self.test_fold}.csv")
test_studio = self.fold_config[self.test_fold]
val_studio = test_studio[:len(test_studio) // 2]
test_studio = test_studio[len(test_studio) // 2:]
self.val = test_split[test_split["PATH"].apply(lambda x: x.split("/")[-3]).isin(val_studio)]
self.test = test_split[test_split["PATH"].apply(lambda x: x.split("/")[-3]).isin(test_studio)]
if not self.include_zoom:
self.zoom = pd.concat([pd.read_csv(f"{self.download_root}/fold{i}.csv")
for i in list(self.fold_config.keys())[-2:] if i != self.test_fold])
def train_dataloader(self) -> DataLoader:
transform = Compose([FilterBank(
frame_length=self.frame_length,
frame_shift=self.frame_shift,
num_mel_bins=self.num_mel_bins)])
train_vistec = SERSliceDataset(
csv_file=self.train,
sampling_rate=self.sampling_rate,
max_len=self.max_len,
center_feats=self.center_feats,
scale_feats=self.scale_feats,
transform=transform)
return DataLoader(train_vistec, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
transform = Compose([FilterBank(
frame_length=self.frame_length,
frame_shift=self.frame_shift,
num_mel_bins=self.num_mel_bins)])
val_vistec = SERSliceTestDataset(
csv_file=self.val,
sampling_rate=self.sampling_rate,
max_len=self.max_len,
center_feats=self.center_feats,
scale_feats=self.scale_feats,
transform=transform)
return DataLoader(val_vistec, batch_size=1, num_workers=self.num_workers)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
transform = Compose([FilterBank(
frame_length=self.frame_length,
frame_shift=self.frame_shift,
num_mel_bins=self.num_mel_bins)])
test_vistec = SERSliceTestDataset(
csv_file=self.test,
sampling_rate=self.sampling_rate,
max_len=self.max_len,
center_feats=self.center_feats,
scale_feats=self.scale_feats,
transform=transform
)
return DataLoader(test_vistec, batch_size=1, num_workers=self.num_workers)
def zoom_dataloader(self) -> DataLoader:
transform = Compose([FilterBank(
frame_length=self.frame_length,
frame_shift=self.frame_shift,
num_mel_bins=self.num_mel_bins)])
zoom_vistec = SERSliceTestDataset(
csv_file=self.zoom,
sampling_rate=self.sampling_rate,
max_len=self.max_len,
center_feats=self.center_feats,
scale_feats=self.scale_feats,
transform=transform
)
return DataLoader(zoom_vistec, batch_size=1, num_workers=self.num_workers)
def extract_feature(self, audio_path: Union[str, List[str]]):
# make audio_path List[str]
if isinstance(audio_path, str):
audio_path = [audio_path]
audio_df = pd.DataFrame([[a] for a in audio_path], columns=["PATH"])
transform = Compose([
FilterBank(
frame_length=self.frame_length,
frame_shift=self.frame_shift,
num_mel_bins=self.num_mel_bins
)
])
feature_dataset = SERInferenceDataset(
csv_file=audio_df,
sampling_rate=self.sampling_rate,
max_len=self.max_len,
center_feats=self.center_feats,
scale_feats=self.scale_feats,
transform=transform
)
return DataLoader(feature_dataset, batch_size=1, num_workers=self.num_workers)
def _get_audio_path(self, audio_name: str) -> str:
if not isinstance(audio_name, str):
raise TypeError(f"audio name must be string but got {type(audio_name)}")
studio_type = audio_name[0]
studio_num = audio_name.split('_')[0][1:]
if studio_type == "s":
directory = f"studio{studio_num}"
elif studio_type == "z":
directory = f"zoom{studio_num}"
else:
raise NameError(f"Error reading file name {audio_name}")
audio_path = f"{self.download_root}/{directory}/con/{audio_name}".replace(".wav", ".flac")
if studio_type == "s":
audio_path = audio_path.replace("con", self.mic_type)
elif studio_type == "z":
audio_path = audio_path.replace("con", "mic")
else:
raise NameError(f"Error reading file name {audio_name}")
if not os.path.exists(audio_path):
raise FileNotFoundError(f"{audio_path} not found")
return audio_path
def _prepare_labels(self):
# format
if not os.path.exists(f"{self.download_root}/labels.csv"):
print("\n+-----------------------------------+")
print("| Formatting labels... |")
print("+-----------------------------------+")
json_path = f"{self.download_root}/labels.json"
if not os.path.exists(json_path):
raise FileNotFoundError(f"labels.json not found at {self.download_root}")
print(f">formatting {json_path} ...")
data = read_json(json_path)
# Filter studio that doesn't appear in download_dir
avail_studio = []
for std in sorted(glob(f"{self.download_root}/*/")):
std = std[:-1].split("/")[-1]
std = std[0] + std[-3:]
avail_studio.append(std)
data = {k: v for k, v in data.items() if k.split("_")[0] in avail_studio}
agreements = get_agreements(data)
labels = pd.DataFrame([
(f"{self._get_audio_path(k)}", correctemo[idx2emo[v]])
for k, v in {k: convert_to_hardlabel(v, thresh=self.agreement_threshold)
for k, v in agreements.items()}.items()
if v != -1
], columns=['PATH', 'EMOTION'])
labels.to_csv(f"{self.download_root}/labels.csv", index=False)
else:
labels = | pd.read_csv(f"{self.download_root}/labels.csv") | pandas.read_csv |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import is_scalar
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas._libs.index as _index
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
make_sparse, _sparse_array_op, SparseArray,
_make_index)
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas._libs.sparse as splib
from pandas.core.sparse.scipy_sparse import (
_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(axes='index', klass='SparseSeries',
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
data = Series(data, index=index)
index = data.index.view()
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isna(data) and isna(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = | BlockIndex(length, locs, lens) | pandas._libs.sparse.BlockIndex |
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.contact_models.contact_model_functions import _draw_nr_of_contacts
from src.contact_models.contact_model_functions import _draw_potential_vacation_contacts
from src.contact_models.contact_model_functions import (
_identify_ppl_affected_by_vacation,
)
from src.contact_models.contact_model_functions import (
calculate_non_recurrent_contacts_from_empirical_distribution,
)
from src.contact_models.contact_model_functions import go_to_daily_work_meeting
from src.contact_models.contact_model_functions import go_to_weekly_meeting
from src.contact_models.contact_model_functions import meet_daily_other_contacts
from src.contact_models.contact_model_functions import reduce_contacts_on_condition
from src.shared import draw_groups
@pytest.fixture
def params():
params = pd.DataFrame()
params["category"] = ["work_non_recurrent"] * 2 + ["other_non_recurrent"] * 2
params["subcategory"] = [
"symptomatic_multiplier",
"positive_test_multiplier",
] * 2
params["name"] = ["symptomatic_multiplier", "positive_test_multiplier"] * 2
params["value"] = [0.0, 0.0, 0.0, 0.0]
params.set_index(["category", "subcategory", "name"], inplace=True)
return params
@pytest.fixture
def states():
"""states DataFrame for testing purposes.
Columns:
- date: 2020-04-01 - 2020-04-30
- id: 50 individuals, with 30 observations each. id goes from 0 to 49.
- immune: bool
- infectious: bool
- age_group: ordered Categorical, either 10-19 or 40-49.
- region: unordered Categorical, ['Overtjssel', 'Drenthe', 'Gelderland']
- n_has_infected: int, 0 to 3.
- cd_infectious_false: int, -66 to 8.
- occupation: Categorical. "working" or "in school".
- cd_symptoms_false: int, positive for the first 20 individuals, negative after.
"""
this_modules_path = Path(__file__).resolve()
states = pd.read_parquet(this_modules_path.parent / "1.parquet")
old_to_new = {old: i for i, old in enumerate(sorted(states["id"].unique()))}
states["id"].replace(old_to_new, inplace=True)
states["age_group"] = pd.Categorical(
states["age_group"], ["10 - 19", "40 - 49"], ordered=True
)
states["age_group"] = states["age_group"].cat.rename_categories(
{"10 - 19": "10-19", "40 - 49": "40-49"}
)
states["region"] = pd.Categorical(
states["region"], ["Overtjssel", "Drenthe", "Gelderland"], ordered=False
)
states["date"] = pd.to_datetime(states["date"], format="%Y-%m-%d", unit="D")
states["n_has_infected"] = states["n_has_infected"].astype(int)
states["cd_infectious_false"] = states["cd_infectious_false"].astype(int)
states["occupation"] = states["age_group"].replace(
{"10-19": "in school", "40-49": "working"}
)
states["cd_symptoms_false"] = list(range(1, 21)) + list(range(-len(states), -20))
states["symptomatic"] = states["cd_symptoms_false"] >= 0
states["knows_infectious"] = False
states["knows_immune"] = False
states["cd_received_test_result_true"] = -100
states["knows_currently_infected"] = states.eval(
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
states["quarantine_compliance"] = 1.0
return states
@pytest.fixture
def a_thursday(states):
a_thursday = states[states["date"] == "2020-04-30"].copy()
a_thursday["cd_symptoms_false"] = list(range(1, 21)) + list(
range(-len(a_thursday), -20)
)
a_thursday["symptomatic"] = a_thursday["cd_symptoms_false"] >= 0
a_thursday["work_recurrent_weekly"] = draw_groups(
df=a_thursday,
query="occupation == 'working'",
assort_bys=["region"],
n_per_group=20,
seed=484,
)
return a_thursday
@pytest.fixture
def no_reduction_params():
params = pd.DataFrame()
params["subcategory"] = ["symptomatic_multiplier", "positive_test_multiplier"]
params["name"] = params["subcategory"]
params["value"] = 1.0
params = params.set_index(["subcategory", "name"])
return params
# ----------------------------------------------------------------------------
def test_go_to_weekly_meeting_wrong_day(a_thursday):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
contact_params = pd.DataFrame()
group_col_name = "group_col"
day_of_week = "Saturday"
seed = 3931
res = go_to_weekly_meeting(
a_thursday, contact_params, group_col_name, day_of_week, seed
)
expected = pd.Series(False, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False)
def test_go_to_weekly_meeting_right_day(a_thursday, no_reduction_params):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
res = go_to_weekly_meeting(
states=a_thursday,
params=no_reduction_params,
group_col_name="group_col",
day_of_week="Thursday",
seed=3931,
)
expected = pd.Series(False, index=a_thursday.index)
expected[:7] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekend(states, no_reduction_params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["work_saturday"] = [True, True] + [False] * (len(a_saturday) - 2)
a_saturday["work_daily_group_id"] = 333
res = go_to_daily_work_meeting(a_saturday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_saturday.index)
expected[:2] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekday(a_thursday, no_reduction_params):
a_thursday["work_daily_group_id"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (
len(a_thursday) - 7
)
res = go_to_daily_work_meeting(a_thursday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_thursday.index)
# not every one we assigned a group id is a worker
expected.iloc[:7] = [True, True, False, True, True, False, True]
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekday_with_reduction(
a_thursday, no_reduction_params
):
reduction_params = no_reduction_params
reduction_params["value"] = 0.0
a_thursday["work_daily_group_id"] = [1, 2, 1, 2, 3, 3, 3, 3, 3] + [-1] * (
len(a_thursday) - 9
)
a_thursday.loc[1450:1458, "symptomatic"] = [
False,
False,
False,
False,
True,
False,
False,
False,
False,
]
res = go_to_daily_work_meeting(a_thursday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_thursday.index)
# not every one we assigned a group id is a worker
expected[:9] = [True, True, False, True, False, False, True, False, True]
assert_series_equal(res, expected, check_names=False)
# --------------------------- Non Recurrent Contact Models ---------------------------
def test_non_recurrent_work_contacts_weekend(states, params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")]
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_saturday,
params=params.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=494,
)
assert_series_equal(res, pd.Series(data=0, index=a_saturday.index, dtype=float))
@pytest.fixture
def params_with_positive():
params = pd.DataFrame.from_dict(
{
"category": ["work_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
)
params = params.set_index(["category", "subcategory", "name"])
return params
def test_non_recurrent_work_contacts_no_random_no_sick(
a_thursday, params_with_positive
):
a_thursday["symptomatic"] = False
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=433,
)
expected = a_thursday["age_group"].replace({"10-19": 0.0, "40-49": 2.0})
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_no_random_no_sick_sat(
states, params_with_positive
):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["symptomatic"] = False
a_saturday["participates_saturday"] = [True, True, True] + [False] * (
len(a_saturday) - 3
)
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_saturday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends="participates",
query="occupation == 'working'",
seed=433,
)
expected = pd.Series(0, index=a_saturday.index)
expected[:2] = 2
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_no_random_with_sick(
a_thursday, params_with_positive
):
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=448,
)
expected = a_thursday["age_group"].replace({"10-19": 0.0, "40-49": 2.0})
expected[:20] = 0.0
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_random_with_sick(a_thursday):
np.random.seed(77)
params = pd.DataFrame.from_dict(
{
"category": ["work_non_recurrent"] * 4,
"subcategory": ["all"] * 2
+ ["symptomatic_multiplier", "positive_test_multiplier"],
"name": [
3,
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [0.5, 0.5, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=338,
)
assert (res[:20] == 0).all() # symptomatics
assert (res[a_thursday["occupation"] != "working"] == 0).all() # non workers
healthy_workers = (a_thursday["occupation"] == "working") & (
a_thursday["cd_symptoms_false"] < 0
)
assert res[healthy_workers].isin([2, 3]).all()
# ------------------------------------------------------------------------------------
def test_non_recurrent_other_contacts_no_random_no_sick(a_thursday):
a_thursday["symptomatic"] = False
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=334,
)
expected = pd.Series(data=2, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_other_contacts_no_random_with_sick(a_thursday):
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=332,
)
expected = pd.Series(data=2, index=a_thursday.index)
expected[:20] = 0
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_other_contacts_random_with_sick(a_thursday):
np.random.seed(770)
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 4,
"subcategory": ["all"] * 2
+ ["symptomatic_multiplier", "positive_test_multiplier"],
"name": [
3,
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [0.5, 0.5, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=474,
)
assert (res[:20] == 0).all() # symptomatics
assert res[a_thursday["cd_symptoms_false"] < 0].isin([2, 3]).all()
# --------------------------------- General Functions ---------------------------------
def test_draw_nr_of_contacts_always_five(states):
dist = pd.DataFrame(
data=[[4, 0, "all"], [5, 1, "all"]], columns=["name", "value", "subcategory"]
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=939)
expected = pd.Series(5.0, index=states.index)
assert_series_equal(res, expected, check_dtype=False)
def test_draw_nr_of_contacts_mean_5(states):
# this relies on the law of large numbers
np.random.seed(3499)
dist = pd.DataFrame(
[[4, 0.5, "all"], [6, 0.5, "all"]], columns=["name", "value", "subcategory"]
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=939)
assert res.isin([4, 6]).all()
assert res.mean() == pytest.approx(5, 0.01)
def test_draw_nr_of_contacts_differ_btw_ages(states):
dist = pd.DataFrame.from_dict(
{"name": [0, 6], "value": [1, 1], "subcategory": ["10-19", "40-49"]}
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=939)
assert (res[states["age_group"] == "10-19"] == 0).all()
assert (res[states["age_group"] == "40-49"] == 6).all()
def test_draw_nr_of_contacts_differ_btw_ages_random(states):
np.random.seed(24)
dist = pd.DataFrame(
data=[
[0, 0.5, "10-19"],
[1, 0.5, "10-19"],
[6, 0.5, "40-49"],
[7, 0.5, "40-49"],
],
columns=["name", "value", "subcategory"],
).set_index(["subcategory", "name"])["value"]
pop = pd.Series(data=True, index=states.index)
res = _draw_nr_of_contacts(dist, pop, states, seed=24)
young = res[states["age_group"] == "10-19"]
old = res[states["age_group"] == "40-49"]
assert young.isin([0, 1]).all()
assert old.isin([6, 7]).all()
assert young.mean() == pytest.approx(0.5, 0.05)
assert old.mean() == pytest.approx(6.5, 0.05)
# ------------------------------------------------------------------------------------
def test_reduce_non_recurrent_contacts_on_condition(states):
nr_of_contacts = pd.Series(data=10, index=states.index)
states["symptomatic"] = [True, True, True] + [False] * (len(states) - 3)
multiplier = 0.5
states.loc[:1, "quarantine_compliance"] = 0.3
expected = pd.Series([10, 10, 0] + [10] * (len(states) - 3))
res = reduce_contacts_on_condition(
contacts=nr_of_contacts,
states=states,
multiplier=multiplier,
condition="symptomatic",
is_recurrent=False,
)
assert_series_equal(res, expected, check_dtype=False)
def test_reduce_recurrent_contacts_on_condition(states):
participating = pd.Series(data=True, index=states.index)
states["symptomatic"] = [True, True, True] + [False] * (len(states) - 3)
states.loc[:0, "quarantine_compliance"] = 0.3
multiplier = 0.5
res = reduce_contacts_on_condition(
contacts=participating,
states=states,
multiplier=multiplier,
condition="symptomatic",
is_recurrent=True,
)
expected = pd.Series([True, False, False] + [True] * (len(states) - 3))
assert_series_equal(res, expected, check_dtype=False)
# ------------------------------------------------------------------------------------
def test_meet_daily_other_contacts():
states = pd.DataFrame()
states["symptomatic"] = [False, False, False, True]
states["knows_infectious"] = [False, False, False, False]
states["knows_immune"] = False
states["cd_received_test_result_true"] = -20
states["daily_meeting_id"] = [-1, 2, 2, 2]
states["knows_currently_infected"] = states.eval(
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
states["quarantine_compliance"] = 1.0
params = pd.DataFrame()
params["value"] = [0.0, 0.0]
params["subcategory"] = ["symptomatic_multiplier", "positive_test_multiplier"]
params["name"] = params["subcategory"]
params = params.set_index(["subcategory", "name"])
res = meet_daily_other_contacts(
states, params, group_col_name="daily_meeting_id", seed=None
)
expected = pd.Series([False, True, True, False])
assert_series_equal(res, expected, check_names=False)
def test_identify_ppl_affected_by_vacation():
states = pd.DataFrame()
# 0: unaffected
# 1: with child
# 2: with educ worker
# 3: with retired
states["hh_id"] = [0, 0, 1, 1, 1, 2, 2, 3, 3]
states["occupation"] = [
# 0
"working",
"stays_home",
# 1
"school",
"working",
"stays_home",
# 2
"nursery_teacher",
"working",
# 3
"retired",
"stays_home",
]
states["educ_worker"] = [False] * 5 + [True] + [False] * 3
res = _identify_ppl_affected_by_vacation(states)
expected = pd.Series([False, False] + [True] * 7)
assert_series_equal(res, expected, check_names=False)
def test_draw_potential_vacation_contacts_not_random():
state_to_vacation = {"A": "Easter", "B": "Spring"} # C has no vacation
states = pd.DataFrame()
states["state"] = ["A", "A", "B", "B", "C"]
params = | pd.DataFrame() | pandas.DataFrame |
from math import log, e
from time import process_time # remove this once functions are finished
import numpy as np
import pandas as pd
#%% Define some functions for statisical analysis on model ensembles
# information entropy - this assumes the inputs are labels, but also works with numerics
def entropy_custom(array, base=None):
""" Computes entropy of label distribution. """
n_labels = len(array)
if n_labels <= 1:
return 0
value,counts = np.unique(array, return_counts=True)
probs = counts / n_labels
n_classes = np.count_nonzero(probs)
if n_classes <= 1:
return 0
ent = 0.
# Compute entropy
base = e if base is None else base
for i in probs:
ent -= i * log(i, base)
return ent
#%% calculate the probability (between 0 and 1) of a lithology in a cell
def litho_probabilites(array):
lithos = np.unique(array)
litho_prob = | pd.DataFrame([]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
| StringIO(data3) | pandas.compat.StringIO |
"""Permutation test function as described in CellPhoneDB 2.0."""
from abc import ABC
from types import MappingProxyType
from typing import (
Any,
List,
Tuple,
Union,
Mapping,
Iterable,
Optional,
Sequence,
TYPE_CHECKING,
)
from functools import partial
from itertools import product
from collections import namedtuple
from scanpy import logging as logg
from anndata import AnnData
from numba import njit, prange # noqa: F401
from scipy.sparse import csc_matrix
import numpy as np
import pandas as pd
from squidpy._docs import d, inject_docs
from squidpy._utils import Signal, SigQueue, parallelize, _get_n_cores
from squidpy.gr._utils import (
_save_data,
_assert_positive,
_create_sparse_df,
_check_tuple_needles,
_assert_categorical_obs,
)
from squidpy._constants._constants import CorrAxis, ComplexPolicy
from squidpy._constants._pkg_constants import Key
__all__ = ["ligrec", "PermutationTest"]
StrSeq = Sequence[str]
SeqTuple = Sequence[Tuple[str, str]]
Interaction_t = Union[pd.DataFrame, Mapping[str, StrSeq], StrSeq, Tuple[StrSeq, StrSeq], SeqTuple]
Cluster_t = Union[StrSeq, Tuple[StrSeq, StrSeq], SeqTuple]
SOURCE = "source"
TARGET = "target"
TempResult = namedtuple("TempResult", ["means", "pvalues"])
_template = """
@njit(parallel={parallel}, cache=False, fastmath=False)
def _test_{n_cls}_{ret_means}_{parallel}(
interactions: np.ndarray, # [np.uint32],
interaction_clusters: np.ndarray, # [np.uint32],
data: np.ndarray, # [np.float64],
clustering: np.ndarray, # [np.uint32],
mean: np.ndarray, # [np.float64],
mask: np.ndarray, # [np.bool_],
res: np.ndarray, # [np.float64],
{args}
) -> None:
{init}
{loop}
{finalize}
for i in prange(len(interactions)):
rec, lig = interactions[i]
for j in prange(len(interaction_clusters)):
c1, c2 = interaction_clusters[j]
m1, m2 = mean[rec, c1], mean[lig, c2]
if np.isnan(res[i, j]):
continue
if m1 > 0 and m2 > 0:
{set_means}
if mask[rec, c1] and mask[lig, c2]:
# both rec, lig are sufficiently expressed in c1, c2
res[i, j] += (groups[c1, rec] + groups[c2, lig]) > (m1 + m2)
else:
res[i, j] = np.nan
else:
# res_means is initialized with 0s
res[i, j] = np.nan
"""
def _create_template(n_cls: int, return_means: bool = False, parallel: bool = True) -> str:
if n_cls <= 0:
raise ValueError(f"Expected number of clusters to be positive, found `{n_cls}`.")
rng = range(n_cls)
init = "".join(
f"""
g{i} = np.zeros((data.shape[1],), dtype=np.float64); s{i} = 0"""
for i in rng
)
loop_body = """
if cl == 0:
g0 += data[row]
s0 += 1"""
loop_body = loop_body + "".join(
f"""
elif cl == {i}:
g{i} += data[row]
s{i} += 1"""
for i in range(1, n_cls)
)
loop = f"""
for row in prange(data.shape[0]):
cl = clustering[row]
{loop_body}
else:
assert False, "Unhandled case."
"""
finalize = ", ".join(f"g{i} / s{i}" for i in rng)
finalize = f"groups = np.stack(({finalize}))"
if return_means:
args = "res_means: np.ndarray, # [np.float64]"
set_means = "res_means[i, j] = (m1 + m2) / 2.0"
else:
args = set_means = ""
return _template.format(
n_cls=n_cls,
parallel=bool(parallel),
ret_means=int(return_means),
args=args,
init=init,
loop=loop,
finalize=finalize,
set_means=set_means,
)
def _fdr_correct(
pvals: pd.DataFrame, corr_method: str, corr_axis: Union[str, CorrAxis], alpha: float = 0.05
) -> pd.DataFrame:
"""Correct p-values for FDR along specific axis in ``pvals``."""
from pandas.core.arrays.sparse import SparseArray
from statsmodels.stats.multitest import multipletests
def fdr(pvals: pd.Series) -> SparseArray:
_, qvals, _, _ = multipletests(
np.nan_to_num(pvals.values, copy=True, nan=1.0),
method=corr_method,
alpha=alpha,
is_sorted=False,
returnsorted=False,
)
qvals[np.isnan(pvals.values)] = np.nan
return SparseArray(qvals, dtype=qvals.dtype, fill_value=np.nan)
corr_axis = CorrAxis(corr_axis)
if corr_axis == CorrAxis.CLUSTERS:
# clusters are in columns
pvals = pvals.apply(fdr)
elif corr_axis == CorrAxis.INTERACTIONS:
pvals = pvals.T.apply(fdr).T
else:
raise NotImplementedError(f"FDR correction for `{corr_axis}` is not implemented.")
return pvals
@d.get_full_description(base="PT")
@d.get_sections(base="PT", sections=["Parameters"])
@d.dedent
class PermutationTestABC(ABC):
"""
Class for receptor-ligand interaction testing.
The expected workflow is::
pt = PermutationTest(adata).prepare()
res = pt.test("clusters")
Parameters
----------
%(adata)s
use_raw
Whether to access :attr:`anndata.AnnData.raw`.
"""
def __init__(self, adata: AnnData, use_raw: bool = True):
if not isinstance(adata, AnnData):
raise TypeError(f"Expected `adata` to be of type `anndata.AnnData`, found `{type(adata).__name__}`.")
if not adata.n_obs:
raise ValueError("No cells are in `adata.obs_names`.")
if not adata.n_vars:
raise ValueError("No genes are in `adata.var_names`.")
self._adata = adata
if use_raw:
if adata.raw is None:
raise AttributeError("No `.raw` attribute found. Try specifying `use_raw=False`.")
if adata.raw.n_obs != adata.n_obs:
raise ValueError(f"Expected `{adata.n_obs}` cells in `.raw` object, found `{adata.raw.n_obs}`.")
adata = adata.raw
self._data = pd.DataFrame.sparse.from_spmatrix(
csc_matrix(adata.X), index=adata.obs_names, columns=adata.var_names
)
self._interactions: Optional[pd.DataFrame] = None
self._filtered_data: Optional[pd.DataFrame] = None
@d.get_full_description(base="PT_prepare")
@d.get_sections(base="PT_prepare", sections=["Parameters", "Returns"])
@inject_docs(src=SOURCE, tgt=TARGET, cp=ComplexPolicy)
def prepare(
self, interactions: Interaction_t, complex_policy: Union[str, ComplexPolicy] = ComplexPolicy.MIN.v
) -> "PermutationTestABC":
"""
Prepare self for running the permutation test.
Parameters
----------
interactions
Interaction to test. The type can be one of:
- :class:`pandas.DataFrame` - must contain at least 2 columns named `{src!r}` and `{tgt!r}`.
- :class:`dict` - dictionary with at least 2 keys named `{src!r}` and `{tgt!r}`.
- :class:`typing.Sequence` - Either a sequence of :class:`str`, in which case all combinations are
produced, or a sequence of :class:`tuple` of 2 :class:`str` or a :class:`tuple` of 2 sequences.
If `None`, the interactions are extracted from :mod:`omnipath`. Protein complexes can be specified by
delimiting the components with `'_'`, such as `'alpha_beta_gamma'`.
complex_policy
Policy on how to handle complexes. Valid options are:
- `{cp.MIN.s!r}` - select gene with the minimum average expression. This is the same as in
:cite:`cellphonedb`.
- `{cp.ALL.s!r}` - select all possible combinations between `{src!r}` and `{tgt!r}` complexes.
Returns
-------
Sets the following attributes and returns :attr:`self`:
- :attr:`interactions` - filtered interactions whose `{src!r}` and `{tgt!r}` are both in the data.
"""
complex_policy = ComplexPolicy(complex_policy)
if isinstance(interactions, Mapping):
interactions = | pd.DataFrame(interactions) | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import os
import pandas as pd
from Swing_old.util.Evaluator import Evaluator
import pdb
import numpy as np
import kdpee
#get all pickle files
path="/projects/p20519/roller_output/optimizing_window_size/RandomForest/janes/"
filenames = next(os.walk(path))[2]
nfiles = len(filenames)
#organize pickled objects by dataset analyzed
obj_list = []
counter = 0
image_file_path = "/home/jjw036/Swing/janes"
target_dataset = '/projects/p20519/Swing/data/invitro/janes_timeseries.tsv'
img_suffix = "1"
gp_left = 0.2
gp_bottom = 0.1
gp_width = 0.7
gp_height = 0.2
padding = 0.01
numTFs = 200
dm_left = gp_left
dm_bottom = gp_bottom+gp_height+2*padding
dm_width = gp_width
box_height = 0.03
dm_height = box_height*numTFs
tableau20 = [ (152,223,138),(31, 119, 180), (174, 199, 232), (255, 127, 14),
(255, 187, 120), (44, 160, 44), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),
(214,39,40)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
dataset_dict = {}
best_alpha_list = []
aupr_list = []
auroc_list = []
tpr_list = []
fpr_list = []
precision_list = []
recall_list = []
aupr_list2 = []
auroc_list2 = []
tpr_list2 = []
fpr_list2 = []
precision_list2 = []
recall_list2 = []
entropies = []
window_start_list = []
window_end_list = []
window_auroc = []
window_size_list = []
fig = plt.figure()
colors = np.linspace(0,1,22)
raw_data_list = []
for file in filenames:
full_path = path + file
#print(full_path)
try:
roller_obj = | pd.read_pickle(full_path) | pandas.read_pickle |
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H( | pd.DataFrame([1, 2, 3]) | pandas.DataFrame |
import pandas as pd
import requests
import datetime
import io
import zipfile
# dictionary containing team abbreviations and their first year in existance
first_season_map = {'ALT': 1884, 'ANA': 1997, 'ARI': 1998, 'ATH': 1871,
'ATL': 1966, 'BAL': 1872, 'BLA': 1901, 'BLN': 1892,
'BLU': 1884, 'BOS': 1871, 'BRA': 1872, 'BRG': 1890,
'BRO': 1884, 'BSN': 1876, 'BTT': 1914, 'BUF': 1879,
'BWW': 1890, 'CAL': 1965, 'CEN': 1875, 'CHC': 1876,
'CHI': 1871, 'CHW': 1901, 'CIN': 1876, 'CKK': 1891,
'CLE': 1871, 'CLV': 1879, 'COL': 1883, 'COR': 1884,
'CPI': 1884, 'DET': 1901, 'DTN': 1881, 'ECK': 1872,
'FLA': 1993, 'HAR': 1874, 'HOU': 1962, 'IND': 1878,
'KCA': 1955, 'KCC': 1884, 'KCN': 1886, 'KCP': 1914,
'KCR': 1969, 'KEK': 1871, 'LAA': 1961, 'LAD': 1958,
'LOU': 1876, 'MAN': 1872, 'MAR': 1873, 'MIA': 2012,
'MIL': 1884, 'MIN': 1961, 'MLA': 1901, 'MLG': 1878,
'MLN': 1953, 'MON': 1969, 'NAT': 1872, 'NEW': 1915,
'NHV': 1875, 'NYG': 1883, 'NYI': 1890, 'NYM': 1962,
'NYP': 1883, 'NYU': 1871, 'NYY': 1903, 'OAK': 1968,
'OLY': 1871, 'PBB': 1890, 'PBS': 1914, 'PHA': 1882,
'PHI': 1873, 'PHK': 1884, 'PHQ': 1890, 'PIT': 1882,
'PRO': 1878, 'RES': 1873, 'RIC': 1884, 'ROC': 1890,
'ROK': 1871, 'SDP': 1969, 'SEA': 1977, 'SEP': 1969,
'SFG': 1958, 'SLB': 1902, 'SLM': 1884, 'SLR': 1875,
'STL': 1875, 'STP': 1884, 'SYR': 1879, 'TBD': 1998,
'TBR': 2008, 'TEX': 1972, 'TOL': 1884, 'TOR': 1977,
'TRO': 1871, 'WAS': 1873, 'WES': 1875, 'WHS': 1884,
'WIL': 1884, 'WOR': 1880, 'WSA': 1961, 'WSH': 1901,
'WSN': 2005}
def validate_datestring(date_text):
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
def sanitize_input(start_dt, end_dt, player_id):
# error if no player ID provided
if player_id is None:
raise ValueError("Player ID is required. If you need to find a player's id, try pybaseball.playerid_lookup(last_name, first_name) and use their key_mlbam. If you want statcast data for all players, try the statcast() function.")
# this id should be a string to place inside a url
player_id = str(player_id)
# if no dates are supplied, assume they want yesterday's data
# send a warning in case they wanted to specify
if start_dt is None and end_dt is None:
today = datetime.datetime.today()
start_dt = (today - datetime.timedelta(1)).strftime("%Y-%m-%d")
end_dt = today.strftime("%Y-%m-%d")
print("Warning: no date range supplied. Returning yesterday's Statcast data. For a different date range, try get_statcast(start_dt, end_dt).")
# if only one date is supplied, assume they only want that day's stats
# query in this case is from date 1 to date 1
if start_dt is None:
start_dt = end_dt
if end_dt is None:
end_dt = start_dt
# now that both dates are not None, make sure they are valid date strings
validate_datestring(start_dt)
validate_datestring(end_dt)
return start_dt, end_dt, player_id
def split_request(start_dt, end_dt, player_id, url):
"""
Splits Statcast queries to avoid request timeouts
"""
current_dt = datetime.datetime.strptime(start_dt, '%Y-%m-%d')
end_dt = datetime.datetime.strptime(end_dt, '%Y-%m-%d')
results = [] # list to hold data as it is returned
player_id = str(player_id)
print('Gathering Player Data')
# break query into multiple requests
while current_dt <= end_dt:
remaining = end_dt - current_dt
# increment date ranges by at most 60 days
delta = min(remaining, datetime.timedelta(days=2190))
next_dt = current_dt + delta
start_str = current_dt.strftime('%Y-%m-%d')
end_str = next_dt.strftime('%Y-%m-%d')
# retrieve data
data = requests.get(url.format(start_str, end_str, player_id))
df = pd.read_csv(io.StringIO(data.text))
# add data to list and increment current dates
results.append(df)
current_dt = next_dt + datetime.timedelta(days=1)
return | pd.concat(results) | pandas.concat |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = | Series([10, 11, 12, 13]) | pandas.Series |
# coding: utf-8
# # ADAGE: Pan-cancer gene expression
#
# **<NAME>017**
#
# This script trains a denoising autoencoder for cancer gene expression data using Keras. It modifies the framework presented by the ADAGE (Analysis using denoising autoencoders of gene expression) model published by [Tan _et al_ 2015](https://doi.org/10.1128/mSystems.00025-15).
#
# An ADAGE model learns a non-linear, reduced dimensional representation of gene expression data by bottlenecking raw features into a smaller set. The model is then trained by minimizing the information lost between input and reconstructed input.
#
# The specific model trained in this notebook consists of gene expression input (5000 most variably expressed genes by median absolute deviation) compressed down into one length 100 vector. The hidden layer is then decoded back to the original 5000 dimensions. The encoding (compression) layer has a `relu` activation and the decoding layer has a `sigmoid` activation. The weights of each layer are glorot uniform initialized. We include an l1 regularization term (see [`keras.regularizers.l1`](https://keras.io/regularizers/) for more details) to induce sparsity in the model, as well as a term controlling the probability of input feature dropout. This is only active during training and is the denoising aspect of the model. See [`keras.layers.noise.Dropout`](https://keras.io/layers/core/) for more details.
#
# We train the autoencoder with the Adadelta optimizer and MSE reconstruction loss.
#
# The pan-cancer ADAGE model is similar to tybalt, but does not constrain the features to match a Gaussian distribution. It is an active research question if the VAE learned features provide any additional benefits over ADAGE features. The VAE is a generative model and therefore permits easy generation of fake data. Additionally, we hypothesize that the VAE learns a manifold that can be interpolated to extract meaningful biological knowledge.
# In[1]:
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pydot
import graphviz
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.layers import Input, Dense, Dropout, Activation
from keras.layers.noise import GaussianDropout
from keras.models import Model
from keras.regularizers import l1
from keras import optimizers
import keras
# In[2]:
print(keras.__version__)
# In[3]:
get_ipython().magic('matplotlib inline')
plt.style.use('seaborn-notebook')
# In[4]:
sns.set(style="white", color_codes=True)
sns.set_context("paper", rc={"font.size":14,"axes.titlesize":15,"axes.labelsize":20,
'xtick.labelsize':14, 'ytick.labelsize':14})
# In[5]:
# Load RNAseq data
rnaseq_file = os.path.join('data', 'pancan_scaled_zeroone_rnaseq.tsv.gz')
rnaseq_df = pd.read_table(rnaseq_file, index_col=0)
print(rnaseq_df.shape)
rnaseq_df.head(2)
# In[6]:
np.random.seed(123)
# In[7]:
# Split 10% test set randomly
test_set_percent = 0.1
rnaseq_test_df = rnaseq_df.sample(frac=test_set_percent)
rnaseq_train_df = rnaseq_df.drop(rnaseq_test_df.index)
# ## Parameter Sweep Results
#
# We previously performed a parameter sweep search over a grid of potential hyperparameter values. Based on this sweep, we determined that the optimal ADAGE parameters are:
#
# | Parameter | Optimal Setting |
# | :-------: | :-------------: |
# | Learning Rate | 1.1 |
# | Sparsity | 0 |
# | Noise | 0.05 |
# | Epochs | 100 |
# | Batch Size | 50 |
# In[8]:
num_features = rnaseq_df.shape[1]
encoding_dim = 100
sparsity = 0
noise = 0.05
epochs = 100
batch_size = 50
learning_rate = 1.1
# In[9]:
# Build the Keras graph
input_rnaseq = Input(shape=(num_features, ))
encoded_rnaseq = Dropout(noise)(input_rnaseq)
encoded_rnaseq_2 = Dense(encoding_dim,
activity_regularizer=l1(sparsity))(encoded_rnaseq)
activation = Activation('relu')(encoded_rnaseq_2)
decoded_rnaseq = Dense(num_features, activation='sigmoid')(activation)
autoencoder = Model(input_rnaseq, decoded_rnaseq)
# In[10]:
autoencoder.summary()
# In[11]:
# Visualize the connections of the custom VAE model
output_model_file = os.path.join('figures', 'adage_architecture.png')
plot_model(autoencoder, to_file=output_model_file)
SVG(model_to_dot(autoencoder).create(prog='dot', format='svg'))
# In[12]:
# Separate out the encoder and decoder model
encoder = Model(input_rnaseq, encoded_rnaseq_2)
encoded_input = Input(shape=(encoding_dim, ))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
# In[13]:
# Compile the autoencoder to prepare for training
adadelta = optimizers.Adadelta(lr=learning_rate)
autoencoder.compile(optimizer=adadelta, loss='mse')
# In[14]:
get_ipython().run_cell_magic('time', '', 'hist = autoencoder.fit(np.array(rnaseq_train_df), np.array(rnaseq_train_df),\n shuffle=True,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(np.array(rnaseq_test_df), np.array(rnaseq_test_df)))')
# In[15]:
# Visualize training performance
history_df = pd.DataFrame(hist.history)
hist_plot_file = os.path.join('figures', 'adage_training.png')
ax = history_df.plot()
ax.set_xlabel('Epochs')
ax.set_ylabel('ADAGE Reconstruction Loss')
fig = ax.get_figure()
fig.savefig(hist_plot_file)
# ### Save Model Outputs
# In[16]:
# Encode rnaseq into the hidden/latent representation - and save output
encoded_samples = encoder.predict(np.array(rnaseq_df))
encoded_rnaseq_df = | pd.DataFrame(encoded_samples, index=rnaseq_df.index) | pandas.DataFrame |
import itertools
from typing import List, Tuple
import dask.dataframe as dd
import dask.dataframe.groupby as ddgb
import dask.delayed
import numpy as np
import pandas
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.backends.base import BaseBackend
from ibis.backends.pandas.udf import nullable # noqa
from .dispatch import execute_node, pre_execute
from .execution.util import (
assert_identical_grouping_keys,
make_meta_series,
make_selected_obj,
safe_scalar_type,
)
def make_struct_op_meta(op: ir.Expr) -> List[Tuple[str, np.dtype]]:
"""Unpacks a dt.Struct into a DataFrame meta"""
return list(
zip(
op._output_type.names,
[x.to_dask() for x in op._output_type.types],
)
)
@pre_execute.register(ops.ElementWiseVectorizedUDF)
@pre_execute.register(ops.ElementWiseVectorizedUDF, BaseBackend)
def pre_execute_elementwise_udf(op, *clients, scope=None, **kwargs):
"""Register execution rules for elementwise UDFs."""
input_type = op.input_type
# definitions
# Define an execution rule for elementwise operations on a
# grouped Series
nargs = len(input_type)
@execute_node.register(
ops.ElementWiseVectorizedUDF,
*(itertools.repeat(ddgb.SeriesGroupBy, nargs)),
)
def execute_udf_node_groupby(op, *args, **kwargs):
func = op.func
# all grouping keys must be identical
assert_identical_grouping_keys(*args)
# we're performing a scalar operation on grouped column, so
# perform the operation directly on the underlying Series
# and regroup after it's finished
args_objs = [make_selected_obj(arg) for arg in args]
groupings = args[0].index
return dd.map_partitions(func, *args_objs).groupby(groupings)
# Define an execution rule for a simple elementwise Series
# function
@execute_node.register(
ops.ElementWiseVectorizedUDF, *(itertools.repeat(dd.Series, nargs))
)
def execute_udf_node(op, *args, **kwargs):
# We have rewritten op.func to be a closure enclosing
# the kwargs, and therefore, we do not need to pass
# kwargs here. This is true for all udf execution in this
# file.
# See ibis.udf.vectorized.UserDefinedFunction
if isinstance(op._output_type, dt.Struct):
meta = make_struct_op_meta(op)
df = dd.map_partitions(op.func, *args, meta=meta)
return df
else:
name = args[0].name if len(args) == 1 else None
meta = pandas.Series(
[], name=name, dtype=op._output_type.to_dask()
)
df = dd.map_partitions(op.func, *args, meta=meta)
return df
@execute_node.register(
ops.ElementWiseVectorizedUDF, *(itertools.repeat(object, nargs))
)
def execute_udf_node_non_dask(op, *args, **kwargs):
return op.func(*args)
return scope
@pre_execute.register(ops.AnalyticVectorizedUDF)
@pre_execute.register(ops.AnalyticVectorizedUDF, BaseBackend)
@pre_execute.register(ops.ReductionVectorizedUDF)
@pre_execute.register(ops.ReductionVectorizedUDF, BaseBackend)
def pre_execute_analytic_and_reduction_udf(op, *clients, scope=None, **kwargs):
input_type = op.input_type
nargs = len(input_type)
# An execution rule to handle analytic and reduction UDFs over
# 1) an ungrouped window,
# 2) an ungrouped Aggregate node, or
# 3) an ungrouped custom aggregation context
# Ungrouped analytic/reduction functions recieve the entire Series at once
# This is generally not recommened.
@execute_node.register(type(op), *(itertools.repeat(dd.Series, nargs)))
def execute_udaf_node_no_groupby(op, *args, aggcontext, **kwargs):
# This function is in essence fully materializing the dd.Series and
# passing that (now) pd.Series to aggctx. This materialization
# happens at `.compute()` time, making this "lazy"
@dask.delayed
def lazy_agg(*series: pandas.Series):
return aggcontext.agg(series[0], op.func, *series[1:])
lazy_result = lazy_agg(*args)
# Depending on the type of operation, lazy_result is a Delayed that
# could become a dd.Series or a dd.core.Scalar
if isinstance(op, ops.AnalyticVectorizedUDF):
if isinstance(op._output_type, dt.Struct):
meta = make_struct_op_meta(op)
else:
meta = make_meta_series(
dtype=op._output_type.to_dask(),
name=args[0].name,
)
result = dd.from_delayed(lazy_result, meta=meta)
if args[0].known_divisions:
if not len({a.divisions for a in args}) == 1:
raise ValueError(
"Mixed divisions passed to AnalyticVectorized UDF"
)
# result is going to be a single partitioned thing, but we
# need it to be able to dd.concat it with other data
# downstream. We know that this udf operation did not change
# the index. Thus, we know the divisions, allowing dd.concat
# to align this piece with the other pieces.
original_divisions = args[0].divisions
result.divisions = (
original_divisions[0],
original_divisions[-1],
)
result = result.repartition(divisions=original_divisions)
else:
# lazy_result is a dd.core.Scalar from an ungrouped reduction
if isinstance(op._output_type, (dt.Array, dt.Struct)):
# we're outputing a dt.Struct that will need to be destructured
# or an array of an unknown size.
# we compute so we can work with items inside downstream.
result = lazy_result.compute()
else:
output_meta = safe_scalar_type(op._output_type.to_dask())
result = dd.from_delayed(
lazy_result, meta=output_meta, verify_meta=False
)
return result
@execute_node.register(
ops.ReductionVectorizedUDF,
*(itertools.repeat(ddgb.SeriesGroupBy, nargs)),
)
def execute_reduction_node_groupby(op, *args, aggcontext, **kwargs):
# To apply a udf func to a list of grouped series we:
# 1. Grab the dataframe they're grouped off of
# 2. Grab the column name for each series
# 3. .apply a wrapper that performs the selection using the col name
# and applies the udf on to those
# This way we rely on dask dealing with groups and pass the udf down
# to the frame level.
assert_identical_grouping_keys(*args)
func = op.func
groupings = args[0].index
parent_df = args[0].obj
out_type = op._output_type.to_dask()
grouped_df = parent_df.groupby(groupings)
col_names = [col._meta._selected_obj.name for col in args]
def apply_wrapper(df, apply_func, col_names):
cols = (df[col] for col in col_names)
return apply_func(*cols)
if len(groupings) > 1:
meta_index = pandas.MultiIndex.from_arrays(
[[0]] * len(groupings), names=groupings
)
meta_value = [dd.utils.make_meta(safe_scalar_type(out_type))]
else:
meta_index = pandas.Index([], name=groupings[0])
meta_value = []
return grouped_df.apply(
apply_wrapper,
func,
col_names,
meta= | pandas.Series(meta_value, index=meta_index, dtype=out_type) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pandas.Index([other.name], name=self.index.name)
# Create a Modin DataFrame from this Series for ease of development
other = DataFrame(pandas.DataFrame(other).T, index=index)._query_compiler
elif isinstance(other, list):
if not isinstance(other[0], DataFrame):
other = pandas.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = DataFrame(other.loc[:, self.columns])._query_compiler
else:
other = DataFrame(other)._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = self.index.append(other.index)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def apply(
self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, string_types):
if axis == 1:
kwds["axis"] = axis
return getattr(self, func)(*args, **kwds)
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif is_list_like(func):
if axis == 1:
raise TypeError(
"(\"'list' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
elif not callable(func):
return
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
if isinstance(query_compiler, pandas.Series):
return query_compiler
return DataFrame(query_compiler=query_compiler)
def as_blocks(self, copy=True):
return self._default_to_pandas(pandas.DataFrame.as_blocks, copy=copy)
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
Args:
columns: If None, return all columns, otherwise,
returns specified columns.
Returns:
values: ndarray
"""
# TODO this is very inefficient, also see __array__
return to_pandas(self).as_matrix(columns)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.asfreq,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def asof(self, where, subset=None):
return self._default_to_pandas(pandas.DataFrame.asof, where, subset=subset)
def assign(self, **kwargs):
return self._default_to_pandas(pandas.DataFrame.assign, **kwargs)
def astype(self, dtype, copy=True, errors="raise", **kwargs):
col_dtypes = {}
if isinstance(dtype, dict):
if not set(dtype.keys()).issubset(set(self.columns)) and errors == "raise":
raise KeyError(
"Only a column name can be used for the key in"
"a dtype mappings argument."
)
col_dtypes = dtype
else:
for column in self.columns:
col_dtypes[column] = dtype
new_query_compiler = self._query_compiler.astype(col_dtypes, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, not copy)
def at_time(self, time, asof=False):
return self._default_to_pandas(pandas.DataFrame.at_time, time, asof=asof)
def between_time(self, start_time, end_time, include_start=True, include_end=True):
return self._default_to_pandas(
pandas.DataFrame.between_time,
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
new_df = self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return to_pandas(self).bool()
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs
)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
# validate inputs
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
if is_list_like(lower) or is_list_like(upper):
if axis is None:
raise ValueError("Must specify axis = 0 or 1")
self._validate_other(lower, axis)
self._validate_other(upper, axis)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, args, kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, inplace=inplace, *args, **kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
return self.clip(lower=threshold, axis=axis, inplace=inplace)
def clip_upper(self, threshold, axis=None, inplace=False):
return self.clip(upper=threshold, axis=axis, inplace=inplace)
def combine(self, other, func, fill_value=None, overwrite=True):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.combine,
other,
func,
fill_value=fill_value,
overwrite=overwrite,
)
def combine_first(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.combine_first, other=other)
def compound(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.compound, axis=axis, skipna=skipna, level=level
)
def consolidate(self, inplace=False):
return self._default_to_pandas(pandas.DataFrame.consolidate, inplace=inplace)
def convert_objects(
self,
convert_dates=True,
convert_numeric=False,
convert_timedeltas=True,
copy=True,
):
return self._default_to_pandas(
pandas.DataFrame.convert_objects,
convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy,
)
def corr(self, method="pearson", min_periods=1):
return self._default_to_pandas(
pandas.DataFrame.corr, method=method, min_periods=min_periods
)
def corrwith(self, other, axis=0, drop=False):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop
)
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.count(
axis=axis, level=level, numeric_only=numeric_only
)
def cov(self, min_periods=None):
return self._default_to_pandas(pandas.DataFrame.cov, min_periods=min_periods)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummax(
axis=axis, skipna=skipna, **kwargs
)
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummin(
axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumsum(
axis=axis, skipna=skipna, **kwargs
)
)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def diff(self, periods=1, axis=0):
"""Finds the difference between elements on the axis requested
Args:
periods: Periods to shift for forming difference
axis: Take difference over rows or columns
Returns:
DataFrame with the diff applied
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.diff(periods=periods, axis=axis)
)
def div(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.div,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.div(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def divide(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.dot, other)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.drop,
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = pandas.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pandas.DataFrame()._construct_axes_from_arguments(
(index, columns), {}
)
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
# TODO Clean up this error checking
if "index" not in axes:
axes["index"] = None
elif axes["index"] is not None:
if not is_list_like(axes["index"]):
axes["index"] = [axes["index"]]
if errors == "raise":
non_existant = [obj for obj in axes["index"] if obj not in self.index]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["index"] = [obj for obj in axes["index"] if obj in self.index]
# If the length is zero, we will just do nothing
if not len(axes["index"]):
axes["index"] = None
if "columns" not in axes:
axes["columns"] = None
elif axes["columns"] is not None:
if not is_list_like(axes["columns"]):
axes["columns"] = [axes["columns"]]
if errors == "raise":
non_existant = [
obj for obj in axes["columns"] if obj not in self.columns
]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["columns"] = [
obj for obj in axes["columns"] if obj in self.columns
]
# If the length is zero, we will just do nothing
if not len(axes["columns"]):
axes["columns"] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"]
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def drop_duplicates(self, subset=None, keep="first", inplace=False):
return self._default_to_pandas(
pandas.DataFrame.drop_duplicates, subset=subset, keep=keep, inplace=inplace
)
def duplicated(self, subset=None, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.duplicated, subset=subset, keep=keep
)
def eq(self, other, axis="columns", level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.eq, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.eq(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Ray DataFrame to simplify logic below
other = DataFrame(other)
if not self.index.equals(other.index) or not self.columns.equals(other.columns):
return False
return all(self.eq(other).all())
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
if isinstance(new_query_compiler, pandas.Series):
return new_query_compiler
else:
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
freq=None,
adjust=True,
ignore_na=False,
axis=0,
):
return self._default_to_pandas(
pandas.DataFrame.ewm,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
return self._default_to_pandas(
pandas.DataFrame.expanding,
min_periods=min_periods,
freq=freq,
center=center,
axis=axis,
)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(
method="ffill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series):
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.fillna,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)._query_compiler
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__)
)
if value is None and method is None:
raise ValueError("must specify a fill method or value")
if value is not None and method is not None:
raise ValueError("cannot specify both a fill method and value")
if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]:
expecting = "pad (ffill) or backfill (bfill)"
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
new_query_compiler = self._query_compiler.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def filter(self, items=None, like=None, regex=None, axis=None):
"""Subset rows or columns based on their labels
Args:
items (list): list of labels to subset
like (string): retain labels where `arg in label == True`
regex (string): retain labels matching regex input
axis: axis to filter on
Returns:
A new DataFrame with the filter applied.
"""
nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if nkw == 0:
raise TypeError("Must pass either `items`, `like`, or `regex`")
if axis is None:
axis = "columns" # This is the default info axis for dataframes
axis = pandas.DataFrame()._get_axis_number(axis)
labels = self.columns if axis else self.index
if items is not None:
bool_arr = labels.isin(items)
elif like is not None:
def f(x):
return like in to_str(x)
bool_arr = labels.map(f).tolist()
else:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
bool_arr = labels.map(f).tolist()
if not axis:
return self[bool_arr]
return self[self.columns[bool_arr]]
def first(self, offset):
return self._default_to_pandas(pandas.DataFrame.first, offset)
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.first_valid_index()
def floordiv(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.floordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.floordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
@classmethod
def from_csv(
cls,
path,
header=0,
sep=", ",
index_col=0,
parse_dates=True,
encoding=None,
tupleize_cols=None,
infer_datetime_format=False,
):
from .io import read_csv
return read_csv(
path,
header=header,
sep=sep,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None):
ErrorMessage.default_to_pandas()
return from_pandas(pandas.DataFrame.from_dict(data, orient=orient, dtype=dtype))
@classmethod
def from_items(cls, items, columns=None, orient="columns"):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_items(items, columns=columns, orient=orient)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ge, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.ge(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return self.ftypes.value_counts().sort_index()
def get_value(self, index, col, takeable=False):
return self._default_to_pandas(
pandas.DataFrame.get_value, index, col, takeable=takeable
)
def get_values(self):
return self._default_to_pandas(pandas.DataFrame.get_values)
def gt(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.gt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.gt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def head(self, n=5):
"""Get the first n rows of the DataFrame.
Args:
n (int): The number of rows to return.
Returns:
A new DataFrame with the first n rows of the DataFrame.
"""
if n >= len(self.index):
return self.copy()
return DataFrame(query_compiler=self._query_compiler.head(n))
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwargs
)
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmax(axis=axis, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmin(axis=axis, skipna=skipna)
def infer_objects(self):
return self._default_to_pandas(pandas.DataFrame.infer_objects)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
"""Print a concise summary of a DataFrame, which includes the index
dtype and column dtypes, non-null values and memory usage.
Args:
verbose (bool, optional): Whether to print the full summary. Defaults
to true
buf (writable buffer): Where to send output. Defaults to sys.stdout
max_cols (int, optional): When to switch from verbose to truncated
output. By defualt, this is 100.
memory_usage (bool, str, optional): Specifies whether the total memory
usage of the DataFrame elements (including index) should be displayed.
True always show memory usage. False never shows memory usage. A value
of 'deep' is equivalent to "True with deep introspection". Memory usage
is shown in human-readable units (base-2 representation). Without deep
introspection a memory estimation is made based in column dtype and
number of rows assuming values consume the same memory amount for
corresponding dtypes. With deep memory introspection, a real memory
usage calculation is performed at the cost of computational resources.
Defaults to True.
null_counts (bool, optional): Whetehr to show the non-null counts. By
default, this is shown only when the frame is smaller than 100 columns
and 1690785 rows. A value of True always shows the counts and False
never shows the counts.
Returns:
Prints the summary of a DataFrame and returns None.
"""
# We will default to pandas because it will be faster than doing two passes
# over the data
buf = sys.stdout if not buf else buf
import io
with io.StringIO() as tmp_buf:
self._default_to_pandas(
pandas.DataFrame.info,
verbose=verbose,
buf=tmp_buf,
max_cols=max_cols,
memory_usage=memory_usage,
null_counts=null_counts,
)
result = tmp_buf.getvalue()
result = result.replace(
"pandas.core.frame.DataFrame", "modin.pandas.dataframe.DataFrame"
)
buf.write(result)
return None
index = self.index
columns = self.columns
dtypes = self.dtypes
# Set up default values
verbose = True if verbose is None else verbose
buf = sys.stdout if not buf else buf
max_cols = 100 if not max_cols else max_cols
memory_usage = True if memory_usage is None else memory_usage
if not null_counts:
if len(columns) < 100 and len(index) < 1690785:
null_counts = True
else:
null_counts = False
# Determine if actually verbose
actually_verbose = True if verbose and max_cols > len(columns) else False
if type(memory_usage) == str and memory_usage == "deep":
memory_usage_deep = True
else:
memory_usage_deep = False
# Start putting together output
# Class denoted in info() output
class_string = "<class 'modin.pandas.dataframe.DataFrame'>\n"
# Create the Index info() string by parsing self.index
index_string = index.summary() + "\n"
if null_counts:
counts = self._query_compiler.count()
if memory_usage:
memory_usage_data = self._query_compiler.memory_usage(
deep=memory_usage_deep, index=True
)
if actually_verbose:
# Create string for verbose output
col_string = "Data columns (total {0} columns):\n".format(len(columns))
for col, dtype in zip(columns, dtypes):
col_string += "{0}\t".format(col)
if null_counts:
col_string += "{0} not-null ".format(counts[col])
col_string += "{0}\n".format(dtype)
else:
# Create string for not verbose output
col_string = "Columns: {0} entries, {1} to {2}\n".format(
len(columns), columns[0], columns[-1]
)
# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + "\n"
# Create memory usage string
memory_string = ""
if memory_usage:
if memory_usage_deep:
memory_string = "memory usage: {0} bytes".format(memory_usage_data)
else:
memory_string = "memory usage: {0}+ bytes".format(memory_usage_data)
# Combine all the components of the info() output
result = "".join(
[class_string, index_string, col_string, dtypes_string, memory_string]
)
# Write to specified output buffer
buf.write(result)
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
else:
if not is_list_like(value):
value = np.full(len(self.index), value)
if not isinstance(value, pandas.Series) and len(value) != len(self.index):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
downcast=None,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
downcast=downcast,
**kwargs
)
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
index_iter = iter(self.index)
def iterrow_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.iterrows()
partition_iterator = PartitionIterator(self._query_compiler, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
col_iter = iter(self.columns)
def items_builder(df):
df.columns = [next(col_iter)]
df.index = self.index
return df.items()
partition_iterator = PartitionIterator(self._query_compiler, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name="Pandas"):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
index_iter = iter(self.index)
def itertuples_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.itertuples(index=index, name=name)
partition_iterator = PartitionIterator(
self._query_compiler, 0, itertuples_builder
)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
"""Join two or more DataFrames, or a DataFrame with a collection.
Args:
other: What to join this DataFrame with.
on: A column name to use from the left for the join.
how: What type of join to conduct.
lsuffix: The suffix to add to column names that match on left.
rsuffix: The suffix to add to column names that match on right.
sort: Whether or not to sort.
Returns:
The joined DataFrame.
"""
if on is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.join,
other,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
if isinstance(other, pandas.Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
pandas.DataFrame(columns=self.columns).join(
pandas.DataFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
other._query_compiler,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
# See note above about error checking with an empty join.
pandas.DataFrame(columns=self.columns).join(
[pandas.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
[obj._query_compiler for obj in other],
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurt,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurtosis,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def last(self, offset):
return self._default_to_pandas(pandas.DataFrame.last, offset)
def last_valid_index(self):
"""Return index for last non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.last_valid_index()
def le(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the le over.
level: The Multilevel index level to apply le over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.le, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.le(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def lookup(self, row_labels, col_labels):
return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the lt over.
level: The Multilevel index level to apply lt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.lt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.lt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mad(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.mad, axis=axis, skipna=skipna, level=level
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mask,
cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
raise_on_error=raise_on_error,
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.max(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes mean across the DataFrame.
Args:
axis (int): The axis to take the mean on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The mean of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.mean(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.median(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
return self._default_to_pandas(
pandas.DataFrame.melt,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def memory_usage(self, index=True, deep=False):
"""Returns the memory usage of each column in bytes
Args:
index (bool): Whether to include the memory usage of the DataFrame's
index in returned Series. Defaults to True
deep (bool): If True, introspect the data deeply by interrogating
objects dtypes for system-level memory consumption. Defaults to False
Returns:
A Series where the index are the column names and the values are
the memory usage of each of the columns in bytes. If `index=true`,
then the first value of the Series will be 'Index' with its memory usage.
"""
result = self._query_compiler.memory_usage(index=index, deep=deep)
result.index = self.columns
if index:
index_value = self.index.memory_usage(deep=deep)
return pandas.Series(index_value, index=["Index"]).append(result)
return result
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""Database style join, where common columns in "on" are merged.
Args:
right: The DataFrame to merge against.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
"""
if not isinstance(right, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type "
"{}".format(type(right))
)
if left_index is False or right_index is False:
if isinstance(right, DataFrame):
right = right._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.merge,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.min(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mod(self, other, axis="columns", level=None, fill_value=None):
"""Mods this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the mod against this.
axis: The axis to mod over.
level: The Multilevel index level to apply mod over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Mod applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mode(self, axis=0, numeric_only=False):
"""Perform mode across the DataFrame.
Args:
axis (int): The axis to take the mode on.
numeric_only (bool): if True, only apply to numeric columns.
Returns:
DataFrame: The mode of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.mode(
axis=axis, numeric_only=numeric_only
)
)
def mul(self, other, axis="columns", level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mul,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mul(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def multiply(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for mul.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self.mul(other, axis, level, fill_value)
def ne(self, other, axis="columns", level=None):
"""Checks element-wise that this is not equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the ne over.
level: The Multilevel index level to apply ne over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ne, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.ne(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def nlargest(self, n, columns, keep="first"):
return self._default_to_pandas(pandas.DataFrame.nlargest, n, columns, keep=keep)
def notna(self):
"""Perform notna across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notnull())
def nsmallest(self, n, columns, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.nsmallest, n, columns, keep=keep
)
def nunique(self, axis=0, dropna=True):
"""Return Series with number of distinct
observations over requested axis.
Args:
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Returns:
nunique : Series
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.nunique(axis=axis, dropna=dropna)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.pct_change,
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
**kwargs
)
def pipe(self, func, *args, **kwargs):
"""Apply func(self, *args, **kwargs)
Args:
func: function to apply to the df.
args: positional arguments passed into ``func``.
kwargs: a dictionary of keyword arguments passed into ``func``.
Returns:
object: the return type of ``func``.
"""
return com._pipe(self, func, *args, **kwargs)
def pivot(self, index=None, columns=None, values=None):
return self._default_to_pandas(
pandas.DataFrame.pivot, index=index, columns=columns, values=values
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
):
return self._default_to_pandas(
pandas.DataFrame.pivot_table,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
)
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs
):
return to_pandas(self).plot
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
result = self[item]
del self[item]
return result
def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.pow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.pow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
prod : Series or DataFrame (if level specified)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
return self._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def product(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
product : Series or DataFrame (if level specified)
"""
return self.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""Return values at the given quantile over requested axis,
a la numpy.percentile.
Args:
q (float): 0 <= q <= 1, the quantile(s) to compute
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specifies which interpolation method to use
Returns:
quantiles : Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the
values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values
are the quantiles.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
def check_dtype(t):
return is_numeric_dtype(t) or is_datetime_or_timedelta_dtype(t)
if not numeric_only:
# If not numeric_only and columns, then check all columns are either
# numeric, timestamp, or timedelta
if not axis and not all(check_dtype(t) for t in self.dtypes):
raise TypeError("can't multiply sequence by non-int of type 'float'")
# If over rows, then make sure that all dtypes are equal for not
# numeric_only
elif axis:
for i in range(1, len(self.dtypes)):
pre_dtype = self.dtypes[i - 1]
curr_dtype = self.dtypes[i]
if not is_dtype_equal(pre_dtype, curr_dtype):
raise TypeError(
"Cannot compare type '{0}' with type '{1}'".format(
pre_dtype, curr_dtype
)
)
else:
# Normally pandas returns this near the end of the quantile, but we
# can't afford the overhead of running the entire operation before
# we error.
if not any(is_numeric_dtype(t) for t in self.dtypes):
raise ValueError("need at least one array to concatenate")
# check that all qs are between 0 and 1
pandas.DataFrame()._check_percentile(q)
axis = pandas.DataFrame()._get_axis_number(axis)
if isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)):
return DataFrame(
query_compiler=self._query_compiler.quantile_for_list_of_values(
q=q,
axis=axis,
numeric_only=numeric_only,
interpolation=interpolation,
)
)
else:
return self._query_compiler.quantile_for_single_value(
q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation
)
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
ErrorMessage.non_verified_udf()
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.query(expr, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def radd(self, other, axis="columns", level=None, fill_value=None):
return self.add(other, axis, level, fill_value)
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
Equal values are assigned a rank that is the [method] of
the ranks of those values.
Args:
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
method: {'average', 'min', 'max', 'first', 'dense'}
Specifies which method to use for equal vals
numeric_only (boolean)
Include only float, int, boolean data.
na_option: {'keep', 'top', 'bottom'}
Specifies how to handle NA options
ascending (boolean):
Decedes ranking order
pct (boolean):
Computes percentage ranking of data
Returns:
A new DataFrame
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.rank(
axis=axis,
method=method,
numeric_only=numeric_only,
na_option=na_option,
ascending=ascending,
pct=pct,
)
)
def rdiv(self, other, axis="columns", level=None, fill_value=None):
"""Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rdiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rdiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=np.nan,
limit=None,
tolerance=None,
):
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.reindex,
labels=labels,
index=index,
columns=columns,
axis=axis,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis == 0 and labels is not None:
index = labels
elif labels is not None:
columns = labels
if index is not None:
new_query_compiler = self._query_compiler.reindex(
0,
index,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
new_query_compiler = self._query_compiler
if columns is not None:
final_query_compiler = new_query_compiler.reindex(
1,
columns,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
final_query_compiler = new_query_compiler
return self._create_dataframe_from_compiler(final_query_compiler, not copy)
def reindex_axis(
self,
labels,
axis=0,
method=None,
level=None,
copy=True,
limit=None,
fill_value=np.nan,
):
return self._default_to_pandas(
pandas.DataFrame.reindex_axis,
labels,
axis=axis,
method=method,
level=level,
copy=copy,
limit=limit,
fill_value=fill_value,
)
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.reindex_like,
other,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
def rename(
self,
mapper=None,
index=None,
columns=None,
axis=None,
copy=True,
inplace=False,
level=None,
):
"""Alters axes labels.
Args:
mapper, index, columns: Transformations to apply to the axis's
values.
axis: Axis to target with mapper.
copy: Also copy underlying data.
inplace: Whether to return a new DataFrame.
level: Only rename a specific level of a MultiIndex.
Returns:
If inplace is False, a new DataFrame with the updated axes.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# We have to do this with the args because of how rename handles
# kwargs. It doesn't ignore None values passed in, so we have to filter
# them ourselves.
args = locals()
kwargs = {k: v for k, v in args.items() if v is not None and k != "self"}
# inplace should always be true because this is just a copy, and we
# will use the results after.
kwargs["inplace"] = True
df_to_rename = pandas.DataFrame(index=self.index, columns=self.columns)
df_to_rename.rename(**kwargs)
if inplace:
obj = self
else:
obj = self.copy()
obj.index = df_to_rename.index
obj.columns = df_to_rename.columns
if not inplace:
return obj
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.name = mapper
else:
renamed.index.name = mapper
if not inplace:
return renamed
def _set_axis_name(self, name, axis=0, inplace=False):
"""Alter the name or names of the axis.
Args:
name: Name for the Index, or list of names for the MultiIndex
axis: 0 or 'index' for the index; 1 or 'columns' for the columns
inplace: Whether to modify `self` directly or return a copy
Returns:
Type of caller or None if inplace=True.
"""
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.set_names(name)
else:
renamed.index.set_names(name)
if not inplace:
return renamed
def reorder_levels(self, order, axis=0):
return self._default_to_pandas(
pandas.DataFrame.reorder_levels, order, axis=axis
)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return self._default_to_pandas(
pandas.DataFrame.replace,
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def resample(
self,
rule,
how=None,
axis=0,
fill_method=None,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
limit=None,
base=0,
on=None,
level=None,
):
return self._default_to_pandas(
pandas.DataFrame.resample,
rule,
how=how,
axis=axis,
fill_method=fill_method,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
limit=limit,
base=base,
on=on,
level=level,
)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into DataFrame columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO Implement level
if level is not None:
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.reset_index,
level=level,
drop=drop,
inplace=inplace,
col_level=col_level,
col_fill=col_fill,
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
# Error checking for matching Pandas. Pandas does not allow you to
# insert a dropped index into a DataFrame if these columns already
# exist.
if (
not drop
and not isinstance(self.index, pandas.MultiIndex)
and all(n in self.columns for n in ["level_0", "index"])
):
raise ValueError("cannot insert level_0, already exists")
new_query_compiler = self._query_compiler.reset_index(drop=drop, level=level)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def rfloordiv(self, other, axis="columns", level=None, fill_value=None):
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rfloordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rfloordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmod(self, other, axis="columns", level=None, fill_value=None):
"""Mod this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rmod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rmod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmul(self, other, axis="columns", level=None, fill_value=None):
return self.mul(other, axis, level, fill_value)
def rolling(
self,
window,
min_periods=None,
freq=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
return self._default_to_pandas(
pandas.DataFrame.rolling,
window,
min_periods=min_periods,
freq=freq,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
def round(self, decimals=0, *args, **kwargs):
"""Round each element in the DataFrame.
Args:
decimals: The number of decimals to round to.
Returns:
A new DataFrame.
"""
return DataFrame(
query_compiler=self._query_compiler.round(decimals=decimals, **kwargs)
)
def rpow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rpow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
# Check to make sure integers are not raised to negative integer powers
if (
is_integer_dtype(type(other))
and other < 0
and all(is_integer_dtype(t) for t in self.dtypes)
):
raise ValueError("Integers to negative integer powers are not allowed.")
new_query_compiler = self._query_compiler.rpow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rsub(self, other, axis="columns", level=None, fill_value=None):
"""Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rsub,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_time_only=True)
new_query_compiler = self._query_compiler.rsub(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
return self.truediv(other, axis, level, fill_value)
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""Returns a random sample of items from an axis of object.
Args:
n: Number of items from axis to return. Cannot be used with frac.
Default = 1 if frac = None.
frac: Fraction of axis items to return. Cannot be used with n.
replace: Sample with or without replacement. Default = False.
weights: Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index.
Index values in weights not found in sampled object will be
ignored and index values in sampled object not in weights will
be assigned weights of zero. If called on a DataFrame, will
accept the name of a column when axis = 0. Unless weights are
a Series, weights must be same length as axis being sampled.
If weights do not sum to 1, they will be normalized to sum
to 1. Missing values in the weights column will be treated as
zero. inf and -inf values not allowed.
random_state: Seed for the random number generator (if int), or
numpy RandomState object.
axis: Axis to sample. Accepts axis number or name.
Returns:
A new Dataframe
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
axis_labels = self.columns
axis_length = len(axis_labels)
else:
# Getting rows requires indices instead of labels. RangeIndex provides this.
axis_labels = pandas.RangeIndex(len(self.index))
axis_length = len(axis_labels)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, pandas.Series):
weights = weights.reindex(self.axes[axis])
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, string_types):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a valid column")
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
weights = pandas.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# weights cannot be NaN when sampling, so we must set all nan
# values to 0
weights = weights.fillna(0)
# If passed in weights are not equal to 1, renormalize them
# otherwise numpy sampling function will error
weights_sum = weights.sum()
if weights_sum != 1:
if weights_sum != 0:
weights = weights / weights_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
if n is None and frac is None:
# default to n = 1 if n and frac are both None (in accordance with
# Pandas specification)
n = 1
elif n is not None and frac is None and n % 1 != 0:
# n must be an integer
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
# compute the number of samples based on frac
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
# Pandas specification does not allow both n and frac to be passed
# in
raise ValueError("Please enter a value for `frac` OR `n`, not both")
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
if n == 0:
# An Empty DataFrame is returned if the number of samples is 0.
# The Empty Dataframe should have either columns or index specified
# depending on which axis is passed in.
return DataFrame(
columns=[] if axis == 1 else self.columns,
index=self.index if axis == 1 else [],
)
if random_state is not None:
# Get a random number generator depending on the type of
# random_state that is passed in
if isinstance(random_state, int):
random_num_gen = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.randomState):
random_num_gen = random_state
else:
# random_state must be an int or a numpy RandomState object
raise ValueError(
"Please enter an `int` OR a "
"np.random.RandomState for random_state"
)
# choose random numbers and then get corresponding labels from
# chosen axis
sample_indices = random_num_gen.choice(
np.arange(0, axis_length), size=n, replace=replace
)
samples = axis_labels[sample_indices]
else:
# randomly select labels from chosen axis
samples = np.random.choice(
a=axis_labels, size=n, replace=replace, p=weights
)
if axis:
query_compiler = self._query_compiler.getitem_column_array(samples)
return DataFrame(query_compiler=query_compiler)
else:
query_compiler = self._query_compiler.getitem_row_array(samples)
return DataFrame(query_compiler=query_compiler)
def select(self, crit, axis=0):
return self._default_to_pandas(pandas.DataFrame.select, crit, axis=axis)
def select_dtypes(self, include=None, exclude=None):
# Validates arguments for whether both include and exclude are None or
# if they are disjoint. Also invalidates string dtypes.
pandas.DataFrame().select_dtypes(include, exclude)
if include and not | is_list_like(include) | pandas.core.dtypes.common.is_list_like |
import rba
import copy
import pandas
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'TranslationM'], external_annotations=None):
out = []
for i in model_processes:
out += [rba_session.ModelStructure.ProteinInfo.Elements[j]['ProtoID']
for j in list(rba_session.ModelStructure.ProcessInfo.Elements[i]['Composition'].keys()) if j in rba_session.ModelStructure.ProteinInfo.Elements.keys()]
if external_annotations is not None:
out += list(external_annotations['ID'])
return(list(set(out)))
def build_model_compartment_map(rba_session):
out = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[i]['Compartment'] for i in list(
rba_session.ModelStructure.ProteinInfo.Elements.keys())}
return(out)
def build_compartment_annotations(Compartment_Annotations_external, model_protein_compartment_map):
for i in Compartment_Annotations_external.index:
if Compartment_Annotations_external.loc[i, 'ID'] in list(model_protein_compartment_map.keys()):
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 1
else:
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 0
Compartment_Annotations_internal = pandas.DataFrame()
Compartment_Annotations_internal['ID'] = list(model_protein_compartment_map.keys())
Compartment_Annotations_internal['ModelComp'] = list(model_protein_compartment_map.values())
Compartment_Annotations = pandas.concat(
[Compartment_Annotations_internal, Compartment_Annotations_external.loc[Compartment_Annotations_external['modelproteinannotation'] == 0, ['ID', 'ModelComp']]], axis=0)
return(Compartment_Annotations)
def build_dataset_annotations(input, ID_column, Uniprot, Compartment_Annotations, model_protein_compartment_map, ribosomal_proteins):
print('riboprots-----------------')
print(ribosomal_proteins)
out = pandas.DataFrame()
for g in list(input[ID_column]):
out.loc[g, 'ID'] = g
matches = [i for i in list(Uniprot.loc[pandas.isna(
Uniprot['Gene names']) == False, 'Gene names']) if g in i]
mass_prot = numpy.nan
if len(matches) > 0:
mass_prot = len(Uniprot.loc[Uniprot['Gene names'] == matches[0], 'Sequence'].values[0])
out.loc[g, 'AA_residues'] = mass_prot
if g in list(Compartment_Annotations['ID']):
out.loc[g, 'Location'] = Compartment_Annotations.loc[Compartment_Annotations['ID']
== g, 'ModelComp'].values[0]
in_model = 0
if g in model_protein_compartment_map.keys():
in_model = 1
is_ribosomal = 0
if g in ribosomal_proteins:
is_ribosomal = 1
out.loc[g, 'InModel'] = in_model
out.loc[g, 'IsRibosomal'] = is_ribosomal
return(out)
def build_full_annotations_from_dataset_annotations(annotations_list):
out = pandas.concat(annotations_list, axis=0)
index = out.index
is_duplicate = index.duplicated(keep="first")
not_duplicate = ~is_duplicate
out = out[not_duplicate]
return(out)
def infer_copy_numbers_from_reference_copy_numbers(fold_changes, absolute_data, matching_column_in_fold_change_data, matching_column_in_absolute_data, conditions_in_fold_change_data_to_restore):
out = pandas.DataFrame()
for i in list(absolute_data['Gene']):
if i in list(fold_changes['Gene']):
FoldChange_match = fold_changes.loc[fold_changes['Gene']
== i, matching_column_in_fold_change_data].values[0]
CopyNumber_match = absolute_data.loc[absolute_data['Gene']
== i, matching_column_in_absolute_data].values[0]
if not pandas.isna(FoldChange_match):
if not pandas.isna(CopyNumber_match):
out.loc[i, 'ID'] = i
out.loc[i, 'Absolute_Reference'] = CopyNumber_match/(2**FoldChange_match)
for gene in list(out['ID']):
Abs_Ref = out.loc[gene, 'Absolute_Reference']
for condition in conditions_in_fold_change_data_to_restore:
out.loc[gene, condition] = Abs_Ref * \
(2**fold_changes.loc[fold_changes['Gene'] == gene, condition].values[0])
return(out)
def add_annotations_to_proteome(input, ID_column, annotations):
for i in input.index:
if input.loc[i, ID_column] in annotations.index:
input.loc[i, 'AA_residues'] = annotations.loc[input.loc[i, ID_column], 'AA_residues']
input.loc[i, 'Location'] = annotations.loc[input.loc[i, ID_column], 'Location']
input.loc[i, 'InModel'] = annotations.loc[input.loc[i, ID_column], 'InModel']
input.loc[i, 'IsRibosomal'] = annotations.loc[input.loc[i, ID_column], 'IsRibosomal']
return(input)
def determine_compartment_occupation(Data, Condition, mass_col='AA_residues', only_in_model=False, compartments_to_ignore=['DEF'], compartments_no_original_PG=[], ribosomal_proteins_as_extra_compartment=True):
for i in compartments_to_ignore:
Data = Data.loc[Data['Location'] != i]
for i in compartments_no_original_PG:
Data = Data.loc[(Data['Location'] != i) | (Data['InModel'] == 1)]
if only_in_model:
Data = Data.loc[Data['InModel'] >= 1]
if ribosomal_proteins_as_extra_compartment:
Data_R = Data.loc[Data['IsRibosomal'] == 1].copy()
Data = Data.loc[Data['IsRibosomal'] == 0]
Data_R_df = Data_R.loc[:, [Condition, mass_col, 'Location']]
Data_R_df[Condition] = Data_R_df[Condition]*Data_R_df[mass_col]
Ribosomal_sum = Data_R_df[Condition].sum()
df = Data.loc[:, [Condition, mass_col, 'Location']]
df[Condition] = df[Condition]*df[mass_col]
out = pandas.DataFrame(df.groupby('Location').sum())
if ribosomal_proteins_as_extra_compartment:
out.loc['Ribosomes', Condition] = Ribosomal_sum
out.loc['Total', Condition] = out[Condition].sum()
out.loc[:, 'original_protein_fraction'] = out[Condition]/out.loc['Total', Condition]
out.rename(columns={Condition: 'original_amino_acid_occupation'}, inplace=True)
out.drop(columns=['AA_residues'], inplace=True)
return(out)
def build_proteome_overview(input, condition, compartments_to_ignore=['DEF', 'DEFA', 'Def'], compartments_no_original_PG=['n', 'Secreted'], ribosomal_proteins_as_extra_compartment=True):
out = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=False)
out_in_model = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=True)
out['original_PG_fraction'] = 1-out_in_model['original_amino_acid_occupation'] / \
out['original_amino_acid_occupation']
return(out)
def determine_correction_factor_A(fractions_entirely_replaced_with_expected_value):
expected_fraction_sum = 0
for i in fractions_entirely_replaced_with_expected_value.keys():
expected_fraction_sum += fractions_entirely_replaced_with_expected_value[i]
factor = 1/(1-expected_fraction_sum)
return(factor)
def determine_correction_factor_B(imposed_compartment_fractions):
expected_fractions = 0
for i in imposed_compartment_fractions.keys():
expected_fractions += imposed_compartment_fractions[i]
factor = 1-expected_fractions
return(factor)
def determine_correction_factor_C(input, condition, reference_condition):
return(input.loc[input['ID'] == 'Total_protein', condition].values[0]/input.loc[input['ID'] == 'Total_protein', reference_condition].values[0])
def correct_protein_fractions(input, factors, directly_corrected_compartments, imposed_compartment_fractions):
out = input.copy()
for c in out.index:
if c in directly_corrected_compartments:
out.loc[c, 'new_protein_fraction'] = out.loc[c,
'original_protein_fraction']*factors['A']*factors['B']
elif c in imposed_compartment_fractions.keys():
out.loc[c, 'new_protein_fraction'] = imposed_compartment_fractions[c]
return(out)
def correct_PG_fraction(input, factors, compartments_no_original_PG, merged_compartments):
out = input.copy()
for c in out.index:
if c == 'Total':
continue
else:
if c in compartments_no_original_PG:
original_fraction = out.loc[c, 'original_protein_fraction']
out.loc[c, 'new_PG_fraction'] = 1 - ((factors['A']*factors['B']*original_fraction) /
out.loc[c, 'new_protein_fraction'])
elif c in merged_compartments.keys():
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']*out.loc[c, 'original_protein_fraction']/(
out.loc[c, 'original_protein_fraction']+out.loc[merged_compartments[c], 'original_protein_fraction'])
else:
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']
return(out)
def merge_compartments(input, merged_compartments):
out = input.copy()
for c in merged_compartments.keys():
out.loc[c, 'new_protein_fraction'] = out.loc[c, 'new_protein_fraction'] + \
out.loc[merged_compartments[c], 'new_protein_fraction']
return(out)
def calculate_new_total_PG_fraction(input):
out = input.copy()
fraction = 0
for c in out.index:
if c not in ['Total', 'Ribosomes']:
fraction += out.loc[c, 'new_protein_fraction']*out.loc[c, 'new_PG_fraction']
out.loc['Total', 'new_PG_fraction'] = fraction
out.loc['Total', 'new_protein_fraction'] = 1
return(out)
def determine_apparent_process_efficiencies(growth_rate, input, rba_session, proteome_summary, protein_data, condition, gene_id_col):
process_efficiencies = pandas.DataFrame()
for i in input.index:
process_ID = input.loc[i, 'Process_ID']
process_name = input.loc[i, 'Process_Name']
process_client_compartments = input.loc[i, 'Client_Compartments'].split(' , ')
constituting_proteins = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[
i]['AAnumber'] for i in rba_session.ModelStructure.ProcessInfo.Elements[process_name]['Composition'].keys()}
Total_client_fraction = sum([proteome_summary.loc[i, 'new_protein_fraction']
for i in process_client_compartments])
n_AAs_in_machinery = 0
machinery_size = 0
for i in constituting_proteins.keys():
if i in protein_data['ID']:
protein_data.loc[protein_data['ID'] == i, ]
n_AAs_in_machinery += protein_data.loc[protein_data['ID'] == i, condition].values[0] * \
protein_data.loc[protein_data['ID'] == i, 'AA_residues'].values[0]
machinery_size += constituting_proteins[i]
# right reference amounth?
if n_AAs_in_machinery > 0:
relative_Protein_fraction_of_machinery = n_AAs_in_machinery / \
proteome_summary.loc['Total', 'original_amino_acid_occupation']
specific_capacity = growth_rate*Total_client_fraction/relative_Protein_fraction_of_machinery
apparent_capacity = specific_capacity*machinery_size
# process_ID[process_name] = apparent_capacity
process_efficiencies.loc[process_name, 'Process'] = process_ID
process_efficiencies.loc[process_name, 'Parameter'] = str(
process_ID+'_apparent_efficiency')
process_efficiencies.loc[process_name, 'Value'] = apparent_capacity
return(process_efficiencies)
def correction_pipeline(input, condition, compartments_to_ignore, compartments_no_original_PG, fractions_entirely_replaced_with_expected_value, imposed_compartment_fractions, directly_corrected_compartments, merged_compartments):
out = build_proteome_overview(input=input, condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=True)
factor_A = determine_correction_factor_A(fractions_entirely_replaced_with_expected_value={
i: imposed_compartment_fractions[i] for i in fractions_entirely_replaced_with_expected_value})
factor_B = determine_correction_factor_B(
imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_protein_fractions(input=out, factors={
'A': factor_A, 'B': factor_B}, directly_corrected_compartments=directly_corrected_compartments, imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_PG_fraction(input=out, factors={
'A': factor_A, 'B': factor_B}, compartments_no_original_PG=compartments_no_original_PG, merged_compartments=merged_compartments)
out = merge_compartments(input=out, merged_compartments=merged_compartments)
out = calculate_new_total_PG_fraction(input=out)
out.to_csv(str('Correction_overview_'+condition+'.csv'))
return({'Summary': out, 'Correction_factors': {'A': factor_A, 'B': factor_B}})
def build_input_for_default_kapp_estimation(input):
out = pandas.DataFrame(columns=['Compartment_ID', 'Density', 'PG_fraction'])
for i in input['Summary'].index:
if i not in ['Total', 'Ribosomes']:
out.loc[i, 'Compartment_ID'] = i
out.loc[i, 'Density'] = input['Summary'].loc[i, 'new_protein_fraction']
out.loc[i, 'PG_fraction'] = input['Summary'].loc[i, 'new_PG_fraction']
return(out)
def flux_bounds_from_input(input, condition, specific_exchanges=None):
flux_mean_df = input.loc[input['Type'] == 'ExchangeFlux_Mean', :]
flux_mean_SE = input.loc[input['Type'] == 'ExchangeFlux_StandardError', :]
out = pandas.DataFrame(columns=['Reaction_ID', 'LB', 'UB'])
if specific_exchanges is None:
exchanges_to_set = list(flux_mean_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
mean_val = flux_mean_df.loc[flux_mean_df['ID'] == rx, condition].values[0]
if not pandas.isna(mean_val):
SE_val = flux_mean_SE.loc[flux_mean_SE['ID'] == str(rx+'_SE'), condition].values[0]
out.loc[rx, 'Reaction_ID'] = rx
if not pandas.isna(SE_val):
lb = mean_val-SE_val
ub = mean_val+SE_val
if mean_val < 0:
out.loc[rx, 'LB'] = lb
if ub > 0:
out.loc[rx, 'UB'] = 0
else:
out.loc[rx, 'UB'] = ub
elif mean_val > 0:
out.loc[rx, 'UB'] = ub
if lb < 0:
out.loc[rx, 'LB'] = 0
else:
out.loc[rx, 'LB'] = lb
else:
out.loc[rx, 'LB'] = lb
out.loc[rx, 'UB'] = ub
else:
out.loc[rx, 'LB'] = mean_val
out.loc[rx, 'UB'] = mean_val
flux_dir_df = input.loc[input['Type'] == 'Flux_Direction', :]
if specific_exchanges is None:
exchanges_to_set = list(flux_dir_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
out.loc[rx, 'Reaction_ID'] = rx
if flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 1:
out.loc[rx, 'LB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == -1:
out.loc[rx, 'UB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 0:
out.loc[rx, 'LB'] = 0
out.loc[rx, 'UB'] = 0
flux_upper_df = input.loc[input['Type'] == 'Flux_Upper_Bound', :]
for rx in list(flux_upper_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'UB'] = flux_upper_df.loc[flux_upper_df['ID'] == rx, condition].values[0]
flux_lower_df = input.loc[input['Type'] == 'Flux_Lower_Bound', :]
for rx in list(flux_lower_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'LB'] = flux_lower_df.loc[flux_lower_df['ID'] == rx, condition].values[0]
return(out)
def growth_Rate_from_input(input, condition):
return(input.loc[input['Type'] == 'Growth_Rate', condition].values[0])
def proteome_fractions_from_input(input, condition):
df = input.loc[input['Type'] == 'Expected_ProteomeFraction', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def medium_concentrations_from_input(input, condition):
df = input.loc[input['Type'] == 'Medium_Concentration', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def build_input_proteome_for_specific_kapp_estimation(proteomics_data, condition):
out = pandas.DataFrame()
out['ID'] = proteomics_data['ID']
out['copy_number'] = proteomics_data[condition]
return(out)
def inject_estimated_efficiencies_into_model(rba_session, specific_kapps=None, default_kapps=None, process_efficiencies=None, round_to_digits=0):
"""
Parameters
----------
specific_kapps : pandas.DataFrame(columns=['Enzyme_ID','Kapp'])
default_kapps : {'default_kapp':value,'default_transporter_kapp':value}
process_efficiencies : pandas.DataFrame(columns=['Process','Parameter','Value'])
"""
if specific_kapps is not None:
parameterized = []
for enz in list(specific_kapps['Enzyme_ID']):
if not pandas.isna(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if enz not in parameterized:
all_enzs = rba_session.ModelStructure.EnzymeInfo.Elements[enz]['Isozymes']
all_enzs.append(enz)
parameterized += all_enzs
if len(all_enzs) == 1:
proto_enz = all_enzs[0]
else:
proto_enz = [i for i in all_enzs if not '_duplicate_' in i][0]
val = round(specific_kapps.loc[specific_kapps['Enzyme_ID']
== enz, 'Kapp'].values[0], round_to_digits)
const = rba.xml.parameters.Function(
str(proto_enz + '_kapp__constant'), 'constant', parameters={'CONSTANT': val}, variable=None)
if str(proto_enz + '_kapp__constant') not in rba_session.model.parameters.functions._elements_by_id.keys():
rba_session.model.parameters.functions.append(const)
else:
rba_session.model.parameters.functions._elements_by_id[const.id] = const
count = 0
for e in rba_session.model.enzymes.enzymes:
if e.id in all_enzs:
count += 1
e.forward_efficiency = str(proto_enz + '_kapp__constant')
e.backward_efficiency = str(proto_enz + '_kapp__constant')
if count == len(all_enzs):
break
if default_kapps is not None:
if type(default_kapps) is dict:
rba_session.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapps['default_kapp']
rba_session.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = default_kapps['default_transporter_kapp']
if process_efficiencies is not None:
for i in process_efficiencies.index:
if process_efficiencies.loc[i, 'Process'] in rba_session.model.processes.processes._elements_by_id.keys():
if not pandas.isna(process_efficiencies.loc[i, 'Value']):
rba_session.model.processes.processes._elements_by_id[process_efficiencies.loc[i,
'Process']].machinery.capacity.value = process_efficiencies.loc[i, 'Parameter']
const = rba.xml.parameters.Function(process_efficiencies.loc[i, 'Parameter'], 'constant', parameters={
'CONSTANT': process_efficiencies.loc[i, 'Value']}, variable=None)
if process_efficiencies.loc[i, 'Parameter'] not in rba_session.model.parameters.functions._elements_by_id.keys():
rba_session.model.parameters.functions.append(const)
else:
rba_session.model.parameters.functions._elements_by_id[const.id] = const
rba_session.rebuild_from_model()
def calibration_workflow(proteome,
condition,
reference_condition,
gene_ID_column,
definition_file,
rba_session,
process_efficiency_estimation_input=None,
default_kapps_provided=None):
t0 = time.time()
correction_results = correction_pipeline(input=proteome,
condition=condition,
compartments_to_ignore=['DEF', 'DEFA', 'Def'],
compartments_no_original_PG=['n', 'Secreted'],
fractions_entirely_replaced_with_expected_value=[
'Ribosomes'],
imposed_compartment_fractions=proteome_fractions_from_input(
input=definition_file, condition=condition),
directly_corrected_compartments=[
'c', 'cM', 'erM', 'gM', 'm', 'mIM', 'mIMS', 'mOM', 'vM', 'x'],
merged_compartments={'c': 'Ribosomes'})
# mumax0 = rba_session.findMaxGrowthRate()
rba_session.setMedium(medium_concentrations_from_input(
input=definition_file, condition=condition))
# mumax1 = rba_session.findMaxGrowthRate()
if process_efficiency_estimation_input is not None:
process_efficiencies = determine_apparent_process_efficiencies(growth_rate=growth_Rate_from_input(
input=definition_file, condition=condition), input=process_efficiency_estimation_input, rba_session=rba_session, protein_data=proteome, proteome_summary=correction_results['Summary'], condition=condition, gene_id_col=gene_ID_column)
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=None, default_kapps=None, process_efficiencies=process_efficiencies)
else:
process_efficiencies = None
protein_scaling_coefficient = 1000 * determine_correction_factor_C(input=definition_file, condition=condition, reference_condition=reference_condition) * \
correction_results['Correction_factors']['A'] * \
correction_results['Correction_factors']['B']/6.022e23
# protein_scaling_coefficient = 1000 * correction_results['Correction_factors']['A'] * correction_results['Correction_factors']['B']/6.022e23
proteome[condition] *= protein_scaling_coefficient
Specific_Kapps = rba_session.estimate_specific_Kapps(proteomicsData=build_input_proteome_for_specific_kapp_estimation(proteome, condition),
flux_bounds=flux_bounds_from_input(
input=definition_file, condition=condition, specific_exchanges=None),
mu=growth_Rate_from_input(
input=definition_file, condition=condition),
biomass_function=None,
target_biomass_function=True)
# Specific_Kapps.loc[(Specific_Kapps['Kapp'] <= 1000000) &
# (Specific_Kapps['Kapp'] >= 1), 'Kapp'].hist()
# plt.show()
# mumax2 = rba_session.findMaxGrowthRate()
if default_kapps_provided is None:
Default_Kapps = rba_session.estimate_default_Kapps(target_mu=growth_Rate_from_input(input=definition_file, condition=condition), compartment_densities_and_PGs=build_input_for_default_kapp_estimation(
correction_results), flux_bounds=flux_bounds_from_input(input=definition_file, condition=condition, specific_exchanges=None), mu_approximation_precision=0.01)
inject_estimated_efficiencies_into_model(rba_session, specific_kapps=None, default_kapps={
'default_kapp': Default_Kapps.iloc[-1, 2], 'default_transporter_kapp': Default_Kapps.iloc[-1, 3]}, process_efficiencies=None)
else:
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=None, default_kapps=default_kapps_provided, process_efficiencies=None)
Default_Kapps = default_kapps_provided
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=Specific_Kapps, default_kapps=None, process_efficiencies=None)
# mumax3 = rba_session.findMaxGrowthRate()
compartment_densities_and_PGs = build_input_for_default_kapp_estimation(correction_results)
for comp in list(compartment_densities_and_PGs['Compartment_ID']):
rba_session.model.parameters.functions._elements_by_id[str(
'fraction_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'Density']
rba_session.model.parameters.functions._elements_by_id[str(
'fraction_non_enzymatic_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'PG_fraction']
rba_session.rebuild_from_model()
rba_session.addExchangeReactions()
rba_session.setMedium(medium_concentrations_from_input(
input=definition_file, condition=condition))
# FBs = flux_bounds_from_input(
# input=definition_file, condition=condition, specific_exchanges=None)
#rba_session.Problem.setLB(dict(zip(list(FBs['Reaction_ID']), list(FBs['LB']))))
# rba_session.Problem.setLB({FBs.loc[i, 'Reaction_ID']: FBs.loc[i, 'LB']
# for i in FBs.index if not pandas.isna(FBs.loc[i, 'LB'])})
# rba_session.Problem.setLB({FBs.loc[i, 'Reaction_ID']: FBs.loc[i, 'UB']
# for i in FBs.index if not pandas.isna(FBs.loc[i, 'UB'])})
#rba_session.Problem.setUB(dict(zip(list(FBs['Reaction_ID']), list(FBs['UB']))))
rba_session.Problem.setLB({'R_EX_cys__L_e': 0, 'R_EX_met__L_e': 0})
rba_session.Problem.setUB({'R_EX_cys__L_e': 0, 'R_EX_met__L_e': 0})
mumax4 = rba_session.findMaxGrowthRate()
rba_session.recordResults('Prokaryotic')
prok_results = copy.deepcopy(rba_session.Results)
rba_session2 = copy.copy(rba_session)
rba_session2.eukaryoticDensities4(CompartmentRelationships=False)
mumax5 = rba_session2.findMaxGrowthRate()
rba_session2.recordResults('Eukaryotic')
# print([Default_Kapps.iloc[-1, 2], Default_Kapps.iloc[-1, 3]])
# print([growth_Rate_from_input(input=definition_file,
# condition=condition), mumax0, mumax1, mumax2, mumax3, mumax4, mumax5])
print(time.time() - t0)
return({'Simulation_Results': prok_results, 'Simulation_Results_Euk': copy.deepcopy(rba_session2.Results), 'Proteome': build_input_proteome_for_specific_kapp_estimation(proteome, condition), 'Correction_Results': correction_results, 'Default_Kapps': Default_Kapps, 'Specific_Kapps': Specific_Kapps, 'Process_Efficiencies': process_efficiencies})
# seaborn.violinplot(x=Specific_Kapps.loc[Specific_Kapps['Kapp'] <= 400000, 'Kapp'])
# Specific_Kapps.loc[(Specific_Kapps['Kapp'] <= 1000000) &
# (Specific_Kapps['Kapp'] >= 1), 'Kapp']).hist()
# plt.show()
# Test predictions
# Given medium predict Mu, Exchanges and Proteome
# Prokaryotic
# Eukaryotic
# 1. import model and uniprot-file and compartment-annotation
## external_annotations for ribosomal-proteins!!! ##
## process-efficiency estimation input ##
## parse input-data properly and add Lahtvee information ##
print('---------------------START----------------------')
Input_Data = pandas.read_csv(
'DataSetsYeastRBACalibration/Calibration_InputDefinition.csv', sep=';', decimal=',', index_col=0)
Process_Efficiency_Estimation_Input = pandas.read_csv(
'DataSetsYeastRBACalibration/Process_Efficiency_Estimation_Input.csv', sep=';', decimal=',')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Uniprot = pandas.read_csv('Yeast_iMM904_RBA_model/uniprot.csv', sep='\t')
Compartment_Annotations_external = pandas.read_csv(
'DataSetsYeastRBACalibration/Manually_curated_Protein_Locations_for_Calibration.csv', index_col=None, sep=';')
Ribosomal_Proteins_Uniprot = pandas.read_csv(
'DataSetsYeastRBACalibration/uniprot_ribosomal_proteins.csv', index_col=None, sep=';')
Hackett_Clim_FCs = pandas.read_csv('DataSetsYeastRBACalibration/Hacket_Clim_ProteinFCs.csv')
Lahtvee_REF = pandas.read_csv('DataSetsYeastRBACalibration/LahtveeRefProteomicsData.csv')
picogram_togram_coefficient = 1e12
Lahtvee_REF['Lahtvee_REF'] *= picogram_togram_coefficient
Lahtvee_REF = Lahtvee_REF.loc[pandas.isna(Lahtvee_REF['Lahtvee_REF']) == False]
ribosomal_proteins = find_ribosomal_proteins(rba_session=Simulation, model_processes=[
'TranslationC', 'TranslationM'], external_annotations=Ribosomal_Proteins_Uniprot)
model_protein_compartment_map = build_model_compartment_map(rba_session=Simulation)
Compartment_Annotations = build_compartment_annotations(
Compartment_Annotations_external=Compartment_Annotations_external, model_protein_compartment_map=model_protein_compartment_map)
print('Annotations to data')
annotations_Lahtvee = build_dataset_annotations(input=Lahtvee_REF, ID_column='Gene', Uniprot=Uniprot,
Compartment_Annotations=Compartment_Annotations, model_protein_compartment_map=model_protein_compartment_map, ribosomal_proteins=ribosomal_proteins)
annotations_Hackett = build_dataset_annotations(input=Hackett_Clim_FCs, ID_column='Gene', Uniprot=Uniprot,
Compartment_Annotations=Compartment_Annotations, model_protein_compartment_map=model_protein_compartment_map, ribosomal_proteins=ribosomal_proteins)
full_annotations = build_full_annotations_from_dataset_annotations(
annotations_list=[annotations_Lahtvee, annotations_Hackett])
####### Bootstrapping-loop starts here #######
restored_Hackett_Data = infer_copy_numbers_from_reference_copy_numbers(fold_changes=Hackett_Clim_FCs, absolute_data=Lahtvee_REF, matching_column_in_fold_change_data='Hackett_C01',
matching_column_in_absolute_data='Lahtvee_REF', conditions_in_fold_change_data_to_restore=['Hackett_C005', 'Hackett_C01', 'Hackett_C016', 'Hackett_C022', 'Hackett_C03'])
restored_Hackett_Data = add_annotations_to_proteome(
input=restored_Hackett_Data, ID_column='ID', annotations=full_annotations)
Lahtvee_REF = add_annotations_to_proteome(
input=Lahtvee_REF, ID_column='Gene', annotations=full_annotations)
# default_kapps_provided={'default_kapp':39673 , 'default_transporter_kapp':396730 }
# default_kapps_provided={'default_kapp':85449 , 'default_transporter_kapp':854490 }
# default_kapps_provided={'default_kapp':128174 , 'default_transporter_kapp':1281740 }
# default_kapps_provided={'default_kapp':280762 , 'default_transporter_kapp':2807620 }
# default_kapps_provided = {'default_kapp': 268555, 'default_transporter_kapp': 2685550}
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C005 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C005', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 39673, 'default_transporter_kapp': 396730})
print('0.05')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C01 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C01', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 85449, 'default_transporter_kapp': 854490})
print('0.1')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C016 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C016', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 128174, 'default_transporter_kapp': 1281740})
print('0.16')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C022 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C022', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 280762, 'default_transporter_kapp': 2807620})
print('0.22')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C03 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C03', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 280762, 'default_transporter_kapp': 2807620})
print('0.3')
specKapps_005 = pandas.DataFrame(index=list(
Calibration_Hackett_C005['Specific_Kapps']['Enzyme_ID']))
specKapps_005['Hackett_C005'] = list(Calibration_Hackett_C005['Specific_Kapps']['Kapp'])
specKapps_01 = pandas.DataFrame(index=list(Calibration_Hackett_C01['Specific_Kapps']['Enzyme_ID']))
specKapps_01['Hackett_C01'] = list(Calibration_Hackett_C01['Specific_Kapps']['Kapp'])
specKapps_016 = pandas.DataFrame(index=list(
Calibration_Hackett_C016['Specific_Kapps']['Enzyme_ID']))
specKapps_016['Hackett_C016'] = list(Calibration_Hackett_C016['Specific_Kapps']['Kapp'])
specKapps_022 = pandas.DataFrame(index=list(
Calibration_Hackett_C022['Specific_Kapps']['Enzyme_ID']))
specKapps_022['Hackett_C022'] = list(Calibration_Hackett_C022['Specific_Kapps']['Kapp'])
specKapps_03 = pandas.DataFrame(index=list(Calibration_Hackett_C03['Specific_Kapps']['Enzyme_ID']))
specKapps_03['Hackett_C03'] = list(Calibration_Hackett_C03['Specific_Kapps']['Kapp'])
all_spec_Kapps = pandas.concat(
[specKapps_005, specKapps_01, specKapps_016, specKapps_022, specKapps_03], axis=1)
all_spec_Kapps['ID'] = all_spec_Kapps.index
all_spec_Kapps.to_csv('Specific_Kapps_out.csv', sep=';', decimal=',')
process_efficiencies_005 = pandas.DataFrame(index=list(
Calibration_Hackett_C005['Process_Efficiencies']['Process']))
process_efficiencies_005['Hackett_C005'] = list(
Calibration_Hackett_C005['Process_Efficiencies']['Value'])
process_efficiencies_01 = pandas.DataFrame(index=list(
Calibration_Hackett_C01['Process_Efficiencies']['Process']))
process_efficiencies_01['Hackett_C01'] = list(
Calibration_Hackett_C01['Process_Efficiencies']['Value'])
process_efficiencies_016 = pandas.DataFrame(index=list(
Calibration_Hackett_C016['Process_Efficiencies']['Process']))
process_efficiencies_016['Hackett_C016'] = list(
Calibration_Hackett_C016['Process_Efficiencies']['Value'])
process_efficiencies_022 = pandas.DataFrame(index=list(
Calibration_Hackett_C022['Process_Efficiencies']['Process']))
process_efficiencies_022['Hackett_C022'] = list(
Calibration_Hackett_C022['Process_Efficiencies']['Value'])
process_efficiencies_03 = pandas.DataFrame(index=list(
Calibration_Hackett_C03['Process_Efficiencies']['Process']))
process_efficiencies_03['Hackett_C03'] = list(
Calibration_Hackett_C03['Process_Efficiencies']['Value'])
all_process_efficiencies = pandas.concat(
[process_efficiencies_005, process_efficiencies_01, process_efficiencies_016, process_efficiencies_022, process_efficiencies_03], axis=1)
all_process_efficiencies['ID'] = all_process_efficiencies.index
all_process_efficiencies.to_csv('Process_efficiencies_out.csv', sep=';', decimal=',')
########
########
Mus_o2 = [0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.28, 0.3, 0.35, 0.4]
O2_J = [0.8, 1.3, 2.5, 3.9, 5.3, 7, 7.4, 6.1, 5.1, 3.7]
Glc_J = [0.3, 0.6, 1.1, 1.7, 2.3, 2.8, 3.4, 4.5, 8.6, 11.1]
CO2_J = [0.8, 1.4, 2.7, 4.2, 5.7, 7.5, 8, 8.8, 14.9, 18.9]
EtOH_J = [0, 0, 0, 0, 0, 0, 0.11, 2.3, 9.5, 13.9]
Ac_J = [0, 0, 0, 0, 0, 0, 0.08, 0.41, 0.62, 0.6]
Glyc_J = [0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0.15]
## Hackett#
Mu_Hackett = [0.0498630244, 0.1054314572, 0.154377453333333, 0.2126503108, 0.293841410333333]
Glc_Hackett = [0.7367, 1.5462, 2.1722, 5.1571, 9.5962]
EtOH_Hackett = [0.0127, 0.0529, 0.1084, 4.6066, 14.0672]
Ac_Hackett = [0.0017, 0.0031, 0.0052, 0.4433, 0.8851]
Glyc_Hackett = [0.0035, 0.0077, 0.0065, 0.0579, 0.1699]
conditions = ['Hackett_C005', 'Hackett_C01', 'Hackett_C016', 'Hackett_C022', 'Hackett_C03']
Mus_predicted = [Calibration_Hackett_C005['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C01['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C016['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C022['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C03['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic']]
Mus_predicted_euk = [Calibration_Hackett_C005['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C01['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C016['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C022['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C03['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic']]
Glc_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic'])]
EtOH_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic'])]
Ac_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_ac', 'Prokaryotic'])]
O2_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_o2', 'Prokaryotic'])]
Glycerol_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic'])]
###
Glc_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic'])]
EtOH_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic'])]
Ac_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_ac', 'Eukaryotic'])]
O2_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_o2', 'Eukaryotic'])]
Glycerol_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic'])]
###
fig, axs = plt.subplots(2, 3, figsize=(28, 7), sharex=True)
# plt.figure()
axs[0, 0].plot(Mu_Hackett, Mu_Hackett, color='lightgreen')
axs[0, 0].scatter(Mu_Hackett, Mus_predicted, color='black')
axs[0, 0].scatter(Mu_Hackett, Mus_predicted_euk, color='red')
axs[0, 0].legend(['Hackett', 'Prok.', 'Euk.'])
axs[0, 0].set_title('Predicted vs measured growth-rate')
axs[0, 0].set_ylabel('$\mu$ [$h^{-1}$]')
axs[0, 0].set_xlabel('$\mu$ [$h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[0, 1].plot(Mus_o2, Glc_J, color='lightblue')
axs[0, 1].plot(Mu_Hackett, Glc_Hackett, color='lightgreen')
axs[0, 1].scatter(Mus_predicted, Glc_Exchange_predicted, color='black', alpha=0.8)
axs[0, 1].scatter(Mus_predicted, Glc_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[0, 1].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[0, 1].set_title('Glucose-uptake rate')
axs[0, 1].set_xlabel('$\mu$ [$h^{-1}$]')
axs[0, 1].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[0, 2].plot(Mus_o2, O2_J, color='lightblue')
# plt.plot(Mu_Hackett,Glc_Hackett,color='lightgreen')
axs[0, 2].scatter(Mus_predicted, O2_Exchange_predicted, color='black', alpha=0.8)
axs[0, 2].scatter(Mus_predicted, O2_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[0, 2].legend(['<NAME>', 'Prok.', 'Euk.'])
axs[0, 2].set_title('Oxygen-uptake rate')
axs[0, 2].set_xlabel('$\mu$ [$h^{-1}$]')
axs[0, 2].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[1, 0].plot(Mus_o2, EtOH_J, color='lightblue')
axs[1, 0].plot(Mu_Hackett, EtOH_Hackett, color='lightgreen')
axs[1, 0].scatter(Mus_predicted, EtOH_Exchange_predicted, color='black', alpha=0.8)
axs[1, 0].scatter(Mus_predicted, EtOH_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 0].legend(['van Hoek', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 0].set_title('Ethanol-excretion rate')
axs[1, 0].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 0].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[1, 1].plot(Mus_o2, Ac_J, color='lightblue')
axs[1, 1].plot(Mu_Hackett, Ac_Hackett, color='lightgreen')
axs[1, 1].scatter(Mus_predicted, Ac_Exchange_predicted, color='black', alpha=0.8)
axs[1, 1].scatter(Mus_predicted, Ac_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_ac',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_ac',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 1].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 1].set_title('Acetate-excretion rate')
axs[1, 1].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 1].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
axs[1, 2].plot(Mus_o2, Glyc_J, color='lightblue')
axs[1, 2].plot(Mu_Hackett, Glyc_Hackett, color='lightgreen')
axs[1, 2].scatter(Mus_predicted, Glycerol_Exchange_predicted, color='black', alpha=0.8)
axs[1, 2].scatter(Mus_predicted, Glycerol_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_ac',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_ac',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 2].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 2].set_title('Glycerol-excretion rate')
axs[1, 2].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 2].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
plt.show()
protein_comparison_005 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C005['Proteome']['ID']))):
protein_comparison_005.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].index):
protein_comparison_005.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C005['Proteome']['ID']):
protein_comparison_005.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C005['Proteome'].loc[Calibration_Hackett_C005['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_01 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C01['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C01['Proteome']['ID']))):
protein_comparison_01.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C01['Simulation_Results']['ProtoProteins'].index):
protein_comparison_01.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C01['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C01['Proteome']['ID']):
protein_comparison_01.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C01['Proteome'].loc[Calibration_Hackett_C01['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_016 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C016['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C016['Proteome']['ID']))):
protein_comparison_016.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C016['Simulation_Results']['ProtoProteins'].index):
protein_comparison_016.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C016['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C016['Proteome']['ID']):
protein_comparison_016.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C016['Proteome'].loc[Calibration_Hackett_C016['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_022 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C022['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C022['Proteome']['ID']))):
protein_comparison_022.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C022['Simulation_Results']['ProtoProteins'].index):
protein_comparison_022.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C022['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C022['Proteome']['ID']):
protein_comparison_022.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C022['Proteome'].loc[Calibration_Hackett_C022['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_03 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C03['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C03['Proteome']['ID']))):
protein_comparison_03.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C03['Simulation_Results']['ProtoProteins'].index):
protein_comparison_03.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C03['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C03['Proteome']['ID']):
protein_comparison_03.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C03['Proteome'].loc[Calibration_Hackett_C03['Proteome']
['ID'] == i, 'copy_number'].values[0]
fig, axs = plt.subplots(2, 3, figsize=(28, 7), sharex=True)
predidcted_proteins = protein_comparison_005.loc[(pandas.isna(protein_comparison_005['Predicted']) == False) & (
pandas.isna(protein_comparison_005['Measured']) == False), 'Predicted']
quantified_proteins = protein_comparison_005.loc[(pandas.isna(protein_comparison_005['Predicted']) == False) & (
pandas.isna(protein_comparison_005['Measured']) == False), 'Measured']
x_reg = numpy.reshape(numpy.array(list(predidcted_proteins)), (len(list(predidcted_proteins)), 1))
y_reg = numpy.reshape(numpy.array(list(quantified_proteins)), (len(list(quantified_proteins)), 1))
regressor = LinearRegression(fit_intercept=False)
regressor.fit(x_reg, y_reg)
predictions = regressor.predict(x_reg)
axs[0, 0].scatter(numpy.log10(protein_comparison_005['Predicted']),
numpy.log10(protein_comparison_005['Measured']))
axs[0, 0].plot([11, 17], [11, 17], color='green')
axs[0, 0].plot(numpy.log10(x_reg), numpy.log10(predictions), color='red', linewidth=2)
axs[0, 0].legend(['Identity', str(regressor.coef_), 'Data'])
axs[0, 0].set_title('Protein-Protein (Log10) 0.05')
axs[0, 0].set_xlabel('Predicted')
axs[0, 0].set_ylabel('Measured')
predidcted_proteins = protein_comparison_01.loc[(pandas.isna(protein_comparison_01['Predicted']) == False) & (
pandas.isna(protein_comparison_01['Measured']) == False), 'Predicted']
quantified_proteins = protein_comparison_01.loc[(pandas.isna(protein_comparison_01['Predicted']) == False) & (
pandas.isna(protein_comparison_01['Measured']) == False), 'Measured']
x_reg = numpy.reshape(numpy.array(list(predidcted_proteins)), (len(list(predidcted_proteins)), 1))
y_reg = numpy.reshape(numpy.array(list(quantified_proteins)), (len(list(quantified_proteins)), 1))
regressor = LinearRegression(fit_intercept=False)
regressor.fit(x_reg, y_reg)
predictions = regressor.predict(x_reg)
axs[0, 1].scatter(numpy.log10(protein_comparison_01['Predicted']),
numpy.log10(protein_comparison_01['Measured']))
axs[0, 1].plot([11, 17], [11, 17], color='green')
axs[0, 1].plot(numpy.log10(x_reg), numpy.log10(predictions), color='red', linewidth=2)
axs[0, 1].legend(['Identity', str(regressor.coef_), 'Data'])
axs[0, 1].set_title('Protein-Protein (Log10) 0.1')
axs[0, 1].set_xlabel('Predicted')
axs[0, 1].set_ylabel('Measured')
predidcted_proteins = protein_comparison_016.loc[(pandas.isna(protein_comparison_016['Predicted']) == False) & (
pandas.isna(protein_comparison_016['Measured']) == False), 'Predicted']
quantified_proteins = protein_comparison_016.loc[(pandas.isna(protein_comparison_016['Predicted']) == False) & (
pandas.isna(protein_comparison_016['Measured']) == False), 'Measured']
x_reg = numpy.reshape(numpy.array(list(predidcted_proteins)), (len(list(predidcted_proteins)), 1))
y_reg = numpy.reshape(numpy.array(list(quantified_proteins)), (len(list(quantified_proteins)), 1))
regressor = LinearRegression(fit_intercept=False)
regressor.fit(x_reg, y_reg)
predictions = regressor.predict(x_reg)
axs[0, 2].scatter(numpy.log10(protein_comparison_016['Predicted']),
numpy.log10(protein_comparison_016['Measured']))
axs[0, 2].plot([11, 17], [11, 17], color='green')
axs[0, 2].plot(numpy.log10(x_reg), numpy.log10(predictions), color='red', linewidth=2)
axs[0, 2].legend(['Identity', str(regressor.coef_), 'Data'])
axs[0, 2].set_title('Protein-Protein (Log10) 0.16')
axs[0, 2].set_xlabel('Predicted')
axs[0, 2].set_ylabel('Measured')
predidcted_proteins = protein_comparison_022.loc[(pandas.isna(protein_comparison_022['Predicted']) == False) & (
pandas.isna(protein_comparison_022['Measured']) == False), 'Predicted']
quantified_proteins = protein_comparison_022.loc[(pandas.isna(protein_comparison_022['Predicted']) == False) & (
| pandas.isna(protein_comparison_022['Measured']) | pandas.isna |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os,sys
# In[2]:
sys.path.insert(0,"./../") #so we can import our modules properly
# In[3]:
get_ipython().run_line_magic('matplotlib', 'notebook')
#auto reload changed modules
from IPython import get_ipython
ipython = get_ipython()
ipython.magic("pylab")
ipython.magic("load_ext autoreload")
ipython.magic("autoreload 2")
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
import pandas as pd
from src.const import * # defines many of the variables used below
from src.db import *
from src.utils import *
from src.preprocessing import *
from pathlib import Path
from mysql.connector import MySQLConnection, Error
# In[4]:
if PUBLIC:
cursor = None
else:
config = read_db_config('./../program_config.ini', section='mysql_nonshiftable')
conn = {}
cursor = {}
conn = MySQLConnection(**config)
cursor = conn.cursor()
# ## House A
# In[5]:
filteredSensorListDB = ['_additional_power', '_dishwasher_power', '_exp_power', '_hp_power', '_imp_power', '_sauna_power', '_stove_power', '_washing_machine_power']
dfsA = []
filteredSensorListA = []
capPeriodsA = []
getRawData(dfsA, filteredSensorListA, capPeriodsA,rawDataBaseDir, startDate=startDateA, endDate=endDateA,cursor=cursor, key=keyA, cip=cipA, filteredSensorListDB=filteredSensorListDB)
filteredSensorListA_OG = filteredSensorListA.copy()
# further prepocessing (indexing, rounding, interpolating, all in one df)
# In[6]:
alreadyProcessed = []
#capPeriodsA = [getCapturePeriodForSensorName(cursor, name) for name in filteredSensorListA_OG]
roundDuplicateEliminateInterpolateResample(dfsA,filteredSensorListA, alreadyProcessed,capPeriodsA)
# In[7]:
dfA_300s = combineDfs(dfsA,filteredSensorListA,startDateA,endDateA,"300s",300,capPeriodsA)
# rename 'A_imp_power' to 'A_total_cons_power' for consistency
# In[8]:
dfA_300s.rename(columns={'A_imp_power' : 'A_total_cons_power'}, inplace=True)
# In[9]:
pathA5min = "datasets/dfA_300s.hdf"
if os.path.exists(pathA5min):
dfA_300s = | pd.read_hdf(pathA5min, 'data') | pandas.read_hdf |
from Bio import AlignIO, SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import SingleLetterAlphabet
import numpy as np
import pandas as pd
import os
import pickle
from treetime import TreeTime
from treetime.utils import parse_dates
########## File and directory location ##########
DATA_FOLDER = "data/"
INTERMEDIATE_FOLDER = "intermediate_files/"
ALIGNMENT_FOLDER = DATA_FOLDER + "alignments/"
RAW_DATA = DATA_FOLDER + "raw/pol.fasta"
RAW_SUB_SAMPLED = DATA_FOLDER + "raw/pol_subsampled.fasta"
REFERENCE_HXB2 = DATA_FOLDER + "reference/HXB2.fasta"
RAW_ALIGNMENT = ALIGNMENT_FOLDER + "raw/pol.fasta"
HXB2_ALIGNMENT = ALIGNMENT_FOLDER + "to_HXB2/pol.fasta"
HXB2_ALIGNMENT_META = ALIGNMENT_FOLDER + "to_HXB2/pol_metadata.csv"
HXB2_ALIGNMENT_TREE = ALIGNMENT_FOLDER + "to_HXB2/pol_tree.nwk"
########## What I used to create / filter the data ##########
def sub_sample_raw_data():
"""
Subsample the raw fasta file for easier analysis. Saves the sub_sampled file in the same directory.
Adds HXB2 sequence as the first sequence.
"""
nb_sample = 1000
# This is not random so it contains mainly sequences from the same time
# record = list(SeqIO.parse(DATA_FOLDER + "raw/pol.fasta", "fasta"))
# sub_sampled = record[:nb_sample]
# This is random
os.system(f"seqtk sample -s100 {RAW_DATA} {nb_sample} > {RAW_SUB_SAMPLED}")
sub_sampled = list(SeqIO.parse(RAW_SUB_SAMPLED, "fasta"))
sub_sampled = insert_sequence(sub_sampled, REFERENCE_HXB2)
SeqIO.write(sub_sampled, RAW_SUB_SAMPLED, "fasta")
def align_subsampled_data():
"""
Uses mafft to align the sequences in the subsampled data.
"""
os.system(f"mafft {RAW_SUB_SAMPLED} > {RAW_ALIGNMENT}")
def MSA_pol_HXB2():
"""
Uses the raw subsampled sequences, align them to HXB2 and remove regions that correspond to gap in HXB2.
Saves the newly obtained MultiSequenceAlignement in fasta.
"""
alignment = AlignIO.read(RAW_ALIGNMENT, 'fasta')
alignment = remove_gaps(alignment, ref_row=0)
alignment = get_pol_region(alignment)
AlignIO.write([alignment], HXB2_ALIGNMENT, "fasta")
def make_metadata():
"""
Creates the metadata file from the names in the alignment. Saves it to the same folder as alignment.
"""
alignment = AlignIO.read(HXB2_ALIGNMENT, "fasta")
df = metadata_from_names(alignment)
df.to_csv(HXB2_ALIGNMENT_META, index=False)
def make_FastTree():
"""
Uses Treetime on created the alignment.
"""
os.system(f"fasttree -nt {HXB2_ALIGNMENT} > {HXB2_ALIGNMENT_TREE}")
def make_TreeTime():
"""
Runs treetime and saves the results.
"""
dates = parse_dates(HXB2_ALIGNMENT_META)
tree = TreeTime(gtr="Jukes-Cantor", tree=HXB2_ALIGNMENT_TREE,
precision=1, aln=HXB2_ALIGNMENT, verbose=2, dates=dates)
result = tree.run(root='best', infer_gtr=True, relaxed_clock=False, max_iter=2,
branch_length_mode='input', n_iqd=3, resolve_polytomies=True,
Tc='skyline', time_marginal="assign", vary_rate=True)
assert result, "Error while running the tree."
return tree
########## Helper functions #########
def insert_sequence(record, sequence_file):
"Insert the sequence sequence at the beginning of the file."
record.insert(0, SeqIO.read(sequence_file, "fasta"))
return record
def remove_gaps(alignment, ref_row=0):
"""
Removes the column where the reference has gaps and return the obtained alignment.
"""
alignment_array = np.array(alignment)
idxs = np.where(alignment_array[ref_row, :] != "-")[0]
alignment_array = alignment_array[:, idxs]
seq_list = []
for ii in range(alignment_array.shape[0]):
seq = "".join(alignment_array[ii, :])
seq_list += [SeqRecord(Seq(seq, SingleLetterAlphabet()), id=alignment[ii].id,
name=alignment[ii].name, description="")]
alignment = MultipleSeqAlignment(seq_list)
return alignment
def get_pol_region(alignment):
"""
Slices the alignment to get the pol region only. Uses HXB2 coordinatese for the selection.
"""
HXB2_start = 2084
HXB2_stop = 5095
return alignment[:, HXB2_start:HXB2_stop + 1]
def metadata_from_names(alignment):
"""
Creates a metadata tsv file from the MSA using the names of the sequences.
"""
columns = ["subtype", "country", "date", "name"]
raw_names = [seq.name for seq in alignment]
df = | pd.DataFrame(data=None, index=None, columns=columns) | pandas.DataFrame |
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
import copy
import itertools
import os
from io import BytesIO, StringIO
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.tests.utils import DATETIME_TYPES, NUMERIC_TYPES, assert_eq
def make_numeric_dataframe(nrows, dtype):
df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = | Series([1, 2 ** 63 - 1], dtype=np.int64) | pandas.core.frame.Series |
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
import pandas as pd
import numpy as np
print(pd.__version__) # 0.23.4
def np_array():
deftype = ([('date', np.str_, 10), ('close', np.float32), ('vol', np.uint32)])
stock = np.array([('2019-01-11', 11.01, 1300000),
('2019-01-12', 12.11, 1200000),
('2019-01-13', 15.01, 1500000),
('2019-01-14', 13.01, 1600000,)], dtype=deftype)
print(stock)
"""
[('2019-01-11', 11.01, 1300000) ('2019-01-12', 12.11, 1200000)
('2019-01-13', 15.01, 1500000) ('2019-01-14', 13.01, 1600000)]
"""
# 4.2 Series的生成和访问
def pd_code1():
# Series的生成#
# data = list
s_list = pd.Series([-1.55666192, 0.127451231, "str-AA", -1.37775038],
index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'])
print(s_list) # 列表中包含多种数据类型
# data = ndarray
s_ndarray = pd.Series(np.arange(4), index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'])
print(s_ndarray)
"""
2019-01-11 0
2019-01-12 1
2019-01-13 2
2019-01-14 3
dtype: int64
"""
# data = scalar value
s_scalar = pd.Series(5., index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'], dtype='int8')
print(s_scalar) # dtype指定元素为'int8'
"""
2019-01-11 5
2019-01-12 5
2019-01-13 5
2019-01-14 5
dtype: int8
"""
# data = dict
s_dict = pd.Series({'2019-01-11': 0., '2019-01-12': 1., '2019-01-13': 2., '2019-01-14': 3.})
print(s_dict)
"""
2019-01-11 0.0
2019-01-12 1.0
2019-01-13 2.0
2019-01-14 3.0
dtype: float64
"""
# data = dict
s_dict = pd.Series({'2019-01-11': 0., '2019-01-12': 1.},
index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'])
print(s_dict) # 元素数量少于索引,缺失位置为NaN
"""
2019-01-11 0.0
2019-01-12 1.0
2019-01-13 NaN
2019-01-14 NaN
dtype: float64
"""
# Series的访问
# 创建被访问对象
series_access = pd.Series([10.23, 11.24, 12.25, 13.26],
index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'])
print(series_access)
"""
2019-01-11 10.23
2019-01-12 11.24
2019-01-13 12.25
2019-01-14 13.26
dtype: float64
"""
# 访问Series全部元素数值
print(series_access.values)
# [10.23 11.24 12.25 13.26]
# 访问Series全部索引值
print(series_access.index)
# Index(['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'], dtype='object')
# 访问'2019-01-11'索引的元素值
print(series_access['2019-01-11'])
# 10.23
# 访问'2019-01-11'和'2019-01-13'索引的元素值
print(series_access[['2019-01-11', '2019-01-13']])
"""
2019-01-11 10.23
2019-01-13 12.25
dtype: float64
"""
# 访问前两个数据
print(series_access[:2])
"""
2019-01-11 10.23
2019-01-12 11.24
dtype: float64
"""
# 4.3 DataFrame的生成和访问
def pd_code2():
# DataFrame的生成
# 以列表组成的字典形式创建DataFrame
df_list_dict = pd.DataFrame({'Close': [1., 2., 3., 5], 'Open': [1., 2., 3., 4.]},
index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'])
print(df_list_dict) # 创建2行4列的表格
"""
Close Open
2019-01-11 1.0 1.0
2019-01-12 2.0 2.0
2019-01-13 3.0 3.0
2019-01-14 5.0 4.0
"""
# 以嵌套列表形式创建DataFrame
df_list_list = pd.DataFrame([[1., 2., 3., 5], [1., 2., 3., 4.]],
index=['2019-01-11', '2019-01-12'],
columns=['Close', 'Open', 'Low', 'High'])
print(df_list_list)
"""
Close Open Low High
2019-01-11 1.0 2.0 3.0 5.0
2019-01-12 1.0 2.0 3.0 4.0
"""
# 二维ndarray形式创建DataFrame
ndarray_data = np.zeros((2), dtype=[('Close', 'i4'), ('Open', 'f4'), ('Low', 'a10')]) # 整数、浮点和字符串
print(ndarray_data)
"""
[(0, 0., b'') (0, 0., b'')]
"""
ndarray_data[:] = [(1, 2., '11.2'), (2, 3., "12.3")]
df_ndarray = pd.DataFrame(data=ndarray_data,
index=['2019-01-11', '2019-01-12']) # 使用默认的定列索引,也可指定列索引columns,这样最终按指定的顺序进行排列
print(df_ndarray)
"""
Close Open Low
2019-01-11 1 2.0 b'11.2'
2019-01-12 2 3.0 b'12.3'
"""
# 以Series组成的字典形式创建DataFrame
series_data = {'Close': pd.Series([1., 2., 3.], index=['2019-01-11', '2019-01-12', '2019-01-13']),
'Open': pd.Series([1., 2., 3., 4.],
index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'])}
df_series = pd.DataFrame(series_data)
print(df_series)
"""
Close Open
2019-01-11 1.0 1.0
2019-01-12 2.0 2.0
2019-01-13 3.0 3.0
2019-01-14 NaN 4.0
"""
df_dict_list = pd.DataFrame([{'Close': 1, 'Open': 2}, {'Close': 5, 'Open': 10, 'High': 20}],
index=['2019-01-11', '2019-01-12'])
# 如果不指定行索引index DataFrame会自动加上行索引
print(df_dict_list)
"""
Close High Open
2019-01-11 1 NaN 2
2019-01-12 5 20.0 10
"""
# 创建被访问DataFrame对象
series_data = {'Close': pd.Series([10.51, 10.52, 10.53, 10.54],
index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14']),
'Open': pd.Series([12.31, 12.32, 12.33, 12.34],
index=['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'])}
df_access = pd.DataFrame(series_data)
print(df_access)
"""
Close Open
2019-01-11 10.51 12.31
2019-01-12 10.52 12.32
2019-01-13 10.53 12.33
2019-01-14 10.54 12.34
"""
# DataFrame的访问
print("***********************访问全部元素 某行/列元素*******************")
# 访问DataFrame全部的行索引
print(df_access.index)
# Index(['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'], dtype='object')
# 访问DataFrame全部的列索引
print(df_access.columns)
# Index(['Close', 'Open'], dtype='object')
# 访问DataFrame全部的行和列索引
print(df_access.axes)
# [Index(['2019-01-11', '2019-01-12', '2019-01-13', '2019-01-14'], dtype='object'), Index(['Close', 'Open'], dtype='object')]
# 访问DataFrame全部元素数值
print(df_access.values)
"""
[[10.51 12.31]
[10.52 12.32]
[10.53 12.33]
[10.54 12.34]]
"""
# 访问某列内容
print(df_access['Open'])
print(df_access.Open)
"""
2019-01-11 12.31
2019-01-12 12.32
2019-01-13 12.33
2019-01-14 12.34
Name: Open, dtype: float64
"""
print(type(df_access['Open'])) # 查看列类型
# <class 'pandas.core.series.Series'>
# 访问某一行内容
print(df_access[0:1])
"""
Close Open
2019-01-11 10.51 12.31
"""
print(type(df_access[0:1])) # 查看行类型
# <class 'pandas.core.frame.DataFrame'>
print("***************************DataFrame.iloc***************************")
# 选取了'2019-01-11'行对应的'Close','Open'这两列的元素内容
print(df_access.loc[['2019-01-11', ], ['Close', 'Open']])
"""
Close Open
2019-01-11 10.51 12.31
"""
# 选取了所有的行以及列索引为'Close','Open'的元素内容
print(df_access.loc[:, ['Close', 'Open']])
"""
Close Open
2019-01-11 10.51 12.31
2019-01-12 10.52 12.32
2019-01-13 10.53 12.33
2019-01-14 10.54 12.34
"""
# 访问到'2019-01-11'这行的元素
print(df_access.loc['2019-01-11'])
"""
Close 10.51
Open 12.31
Name: 2019-01-11, dtype: float64
"""
# 选取了前两行,第一列的元素。
print(df_access.iloc[0:2, 0:1])
"""
Close
2019-01-11 10.51
2019-01-12 10.52
"""
# 选取了前两行,所有列的元素
print(df_access.iloc[0:2])
"""
Close Open
2019-01-11 10.51 12.31
2019-01-12 10.52 12.32
"""
# 除了指定某个范围方式选取外,还可自由选取行和列的位置所对应的数据元素,访问第0行和第2行,第一列和第二列的元素
print(df_access.iloc[[0, 2], [0, 1]])
"""
Close Open
2019-01-11 10.51 12.31
2019-01-13 10.53 12.33
"""
# 采用混合标签和位置的方式访问元素 从'Open'列索引中获取第0个和第2个元素
# print(df_access.ix[[0, 2], ['Open']])
"""
Open
2019-01-11 12.31
2019-01-13 12.33
"""
print(df_access.index[[0, 2]])
# Index(['2019-01-11', '2019-01-13'], dtype='object')
print(df_access.loc[df_access.index[[0, 2]], ['Open']])
"""
Open
2019-01-11 12.31
2019-01-13 12.33
"""
print(df_access.columns.get_indexer(['Open'])) # [1]
print(df_access.columns.get_loc('Open')) # 1
print(df_access.iloc[[0, 2], df_access.columns.get_indexer(['Open'])])
"""
Open
2019-01-11 12.31
2019-01-13 12.33
"""
print(df_access.index.get_loc('2019-01-12')) # 1
print("***************************条件表达式访问元素***************************")
print(df_access.Open > df_access.Open.mean())
"""
2019-01-11 False
2019-01-12 False
2019-01-13 True
2019-01-14 True
Name: Open, dtype: bool
"""
print(df_access[df_access.Open > df_access.Open.mean()])
"""
Close Open
2019-01-13 10.53 12.33
2019-01-14 10.54 12.34
"""
print(df_access.loc[df_access.Open > df_access.Open.mean(), 'Close'])
"""
2019-01-13 10.53
2019-01-14 10.54
Name: Close, dtype: float64
"""
# 4.4 Python Pandas 时间序列的生成方法
def pd_code3():
# Python datetime模块的使用
from datetime import date, time, datetime, timedelta
print("*****example-4.35*****")
# date.resolution:date对象表示日期的最小单位
print(f'date.resolution: {date.resolution}')
# time.resolution:time对象表示时间的最小单位
print(f'time.resolution: {time.resolution}')
# datetime.resolution:datetime对象表示时间的最小单位
print(f'datetime.resolution: {datetime.resolution}')
"""
date.resolution: 1
day, 0: 00:00
time.resolution: 0:00: 00.000001
datetime.resolution: 0:00: 00.000001
"""
print("*********************\n")
print("*****example-4.36*****")
# date.max、date.min:date对象所能表示的最大、最小日期范围
print(f'date.max: {date.max} and date.min: {date.min}')
# time.max、time.min:time对象所能表示的最大、最小时间
范围
print(f'time.max: {time.max} and time.min: {time.min}')
# datetime.max、datetime.min:datetime对象所能表示的最大、最小时间范围
print(f'datetime.max: {datetime.max} and datetime.min: {datetime.min}')
"""
date.max: 9999-12-31 and date.min: 0001-01-01
time.max: 23:59:59.999999 and time.min: 00:00:00
datetime.max: 9999-12-31 23:59:59.999999 and datetime.min: 0001-01-01 00:00:00
"""
print("*********************\n")
print("*****example-4.37*****")
# 构造datetime实例对象
# datetime (year, month, day[ , hour[ , minute[ , second[ , microsecond[ , tzinfo]]]]])
datetime_obj = datetime(2016, 10, 26, 10, 23, 15, 1)
print(f'datetime: {datetime_obj}')
# datetime: 2016-10-26 10:23:15.000001
print("*********************\n")
print("*****example-4.38*****")
# replace 用参数指定代替原有对象中的属性生成新的datetime时间对象
re_datetime_obj = datetime_obj.replace(day=27, hour=20)
print(f'datetime: {re_datetime_obj}')
#
.isoformat():返回型如"YYYY-MM-DD HH:MM:SS"格式的字符串时间
print(f'datetime.isoformat(): {datetime_obj.isoformat()}')
# .strftime(fmt):format自定义格式化时间字
print(f'strftime():{datetime_obj.strftime("%Y-%M-%d %X")}')
"""
datetime: 2016-10-27 20:23:15.000001
datetime.isoformat(): 2016-10-26T10:23:15.000001
strftime():2016-23-26 10:23:15
"""
print("*********************\n")
print("*****example-4.39*****")
print(f'datetime.strptime():{datetime.strptime("2016-10-26", "%Y-%m-%d")}')
print(f'fromtimestamp():{datetime.fromtimestamp(1429417200.0)}')
print(f'utcfromtimestamp():{datetime.utcfromtimestamp(1429417200.0)}')
print(f'datetime.now():{datetime.now()}')
"""
datetime.strptime():2016-10-26 00:00:00
fromtimestamp():2015-04-19 12:20:00
utcfromtimestamp():2015-04-19 04:20:00
datetime.now():2019-10-20 13:49:20.402097
"""
print("*********************\n")
print("*****example-4.40*****")
delta_obj = datetime.strptime("2019-10-18 04:20:00", "%Y-%m-%d %X") - datetime.strptime("2019-10-01 04:20:00",
"%Y-%m-%d %X")
print(type(delta_obj), delta_obj)
print(delta_obj.days, delta_obj.total_seconds())
"""
<class 'datetime.timedelta'> 17 days, 0:00:00
17 1468800.0
"""
print("*********************\n")
print("*****example-4.41*****")
dt = datetime.now()
dt1 = dt + timedelta(days=1, hours=1) # 明天 后1小时
dt2 = dt + timedelta(days=-1) # 昨天
dt3 = dt - timedelta(days=1) # 昨天
print(f"{dt1}\n{dt2}\n{dt3}\n")
"""
2019-10-21 14:49:20.402735
2019-10-19 13:49:20.402735
2019-10-19 13:49:20.402735
"""
print("*********************\n")
print("*****example-4.42*****")
ts = pd.Timestamp(2019, 1, 1, 2, 3, 4)
print(f'pd.Timestamp()-1:{ts}')
ts = pd.Timestamp(datetime(2019, 1, 1, hour=2, minute=3, second=4))
print(f'pd.Timestamp()-2:{ts}')
ts = pd.Timestamp("2019-1-1 2:3:4")
print(f'pd.Timestamp()-3:{ts}')
print(f'pd.Timestamp()-type:{type(ts)}')
"""
pd.Timestamp()-1:2019-01-01 02:03:04
pd.Timestamp()-2:2019-01-01 02:03:04
pd.Timestamp()-3:2019-01-01 02:03:04
pd.Timestamp()-type:<class 'pandas._libs.tslibs.timestamps.Timestamp'>
"""
print("*********************\n")
print("*****example-4.43*****")
dt = pd.to_datetime(datetime(2019, 1, 1, hour=0, minute=1, second=1))
print(f'pd.to_datetime()-1:{dt}')
dt = pd.to_datetime("2019-1-1 0:1:1")
print(f'pd.to_datetime()-2:{dt}')
print(f'pd.to_datetime()-type:{type(dt)}')
"""
pd.to_datetime()-1:2019-01-01 00:01:01
pd.to_datetime()-2:2019-01-01 00:01:01
pd.to_datetime()-type:<class 'pandas._libs.tslibs.timestamps.Timestamp'>
"""
print("*********************\n")
print("*****example-4.44*****")
# pd.to_datetime生成自定义时间序列
dtlist = pd.to_datetime(["2019-1-1 0:1:1", "2019-2-1 0:1:1", "2019-3-1 0:1:1"])
print(f'pd.to_datetime()-list:{dtlist}')
"""
pd.to_datetime()-list:DatetimeIndex(['2019-01-01 00:01:01', '2019-02-01 00:01:01',
'2019-03-01 00:01:01'],
dtype='datetime64[ns]', freq=None)
"""
print("*********************\n")
print("*****example-4.45*****")
dt_0 = pd.to_datetime(datetime(2019, 1, 1, hour=0, minute=0, second=0))
dt_1 = dt_0 + pd.Timedelta(days=5, minutes=50, seconds=20)
print(f'datetime-1:{dt_0}\ndatetime-2:{dt_1}')
"""
datetime-1:2019-01-01 00:00:00
datetime-2:2019-01-06 00:50:20
"""
print("*********************\n")
print("*****example-4.46*****")
date_rng = | pd.date_range('2019-01-01', freq='M', periods=12) | pandas.date_range |
import numpy as np
import pandas as pd
import scanpy as sc
from termcolor import colored
import time
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import euclidean_distances
import umap
import phate
import seaborn as sns
from pyVIA.core import *
def cellrank_Human(ncomps=80, knn=30, v0_random_seed=7):
import scvelo as scv
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
ad = scv.read_loom('/home/shobi/Downloads/Human Hematopoietic Profiling homo_sapiens 2019-11-08 16.12.loom')
print(ad)
# ad = sc.read('/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# ad.obs['nover_label'] = nover_labels
print('start cellrank pipeline', time.ctime())
# scv.utils.show_proportions(ad)
scv.pl.proportions(ad)
scv.pp.filter_and_normalize(ad, min_shared_counts=20, n_top_genes=2000)
sc.tl.pca(ad, n_comps=ncomps)
n_pcs = ncomps
print('npcs', n_pcs, 'knn', knn)
sc.pp.neighbors(ad, n_pcs=n_pcs, n_neighbors=knn)
sc.tl.louvain(ad, key_added='clusters', resolution=1)
scv.pp.moments(ad, n_pcs=n_pcs, n_neighbors=knn)
scv.tl.velocity(ad)
scv.tl.velocity_graph(ad)
scv.pl.velocity_embedding_stream(ad, basis='umap', color='nover_label')
def adata_preprocess(adata, n_top_genes=1000, log=True):
# this is a lot like the steps for scvelo.pp.filter_and_normalize() which also allows selection of top genes (see Pancreas)
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count#1
# print(adata)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell #same as normalize_total()
adata, key_n_counts='n_counts_all'
)
# select highly-variable genes
filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
total = adata.X
total = total.sum(axis=0).transpose()
total = pd.DataFrame(total.transpose())
print('total')
print(total.shape)
#total = total.sum(axis=0).transpose()
total.columns = [i for i in adata.var_names]
print(total)
total.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/library_counts_500hvg.csv')
sc.pp.scale(adata, max_value=10)
from sklearn.decomposition import PCA
pca = PCA(n_components=499) # estimate only 2 PCs
X_new = pca.fit_transform(adata.X)
print('variance explained')
print(pca.explained_variance_ratio_)
print('pca.components_ shape ncomp x nfeat')
print()
df = pd.DataFrame(abs(pca.components_))
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print('done saving')
'''
# sc.pp.scale(adata, max_value=10)zheng scales after the log, but this doesnt work well and is also not used in scvelo.pp.filter_and_normalize
return adata
def main_Human(ncomps=80, knn=30, v0_random_seed=7, run_palantir_func=False):
'''
df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print(df)
df = df.set_index('Unnamed: 0')
print(df)
df = df.sort_values(by='totals', axis=1, ascending = False)
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_sorted_500hvg.csv')
print('saved')
'''
import random
random.seed(100)
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC (cDC)',
'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
# NOTE: Myeloid DCs are now called Conventional Dendritic Cells cDCs
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
df_nover = pd.DataFrame(nover_labels)
# df_nover.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/noverLabelsforMonocle.csv')
print('save nover')
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
palantir_tsne_df = pd.DataFrame(tsnem)
# palantir_tsne_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/palantir_tsne.csv')
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw.X: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(ad.X)
print(ad.raw.X.shape)
# df_X = pd.DataFrame(ad.raw.X.todense(), columns = ad.var_names)
# df_X.columns = [i for i in ad.var_names]
# print('starting to save .X')
# df_X.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/expression_matrix_raw.csv")
print('finished save .X')
# (ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
adata_counts_raw = sc.AnnData(ad.raw.X)
adata_counts_raw.var_names = [i for i in ad.var_names]
# adata_counts_raw = adata_preprocess(adata_counts_raw, n_top_genes=500, log=True) # when using HVG and no PCA
# sc.tl.pca(adata_counts_raw,svd_solver='arpack', n_comps=ncomps)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = [
'ITGAX'] # ['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
# 'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
true_label = nover_labels # revised_clus
root_user = [4823]
print('v0 random seed', v0_random_seed)
# df_temp_write = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
# df_temp_write.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/Human_CD34_200PCA.csv")
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.X.todense()
print(time.ctime())
print(time.ctime())
v0 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.2,
root_user=root_user, dataset='humanCD34', preserve_disconnected=True, random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, pseudotime_threshold_TS=10,
neighboring_terminal_states_threshold=3) # *.4 root=1,
v0.run_VIA()
v0.make_JSON(filename='scRNA_Hema_temp.js')
super_labels = v0.labels
print('starting to save selected genes')
genes_save = ['ITGAX', 'GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA',
'ITGAX', 'IGHD',
'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
df_selected_genes = pd.DataFrame(adata_counts.X, columns=[cc for cc in adata_counts.var_names])
df_selected_genes = df_selected_genes[genes_save]
# df_selected_genes.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/selected_genes.csv")
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
print('start magic')
gene_list_magic = ['IL3RA', 'IRF8', 'GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B', 'SPI1', 'CD34', 'CSF1R', 'ITGAX']
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=gene_list_magic)
df_magic_cluster = df_magic.copy()
df_magic_cluster['parc'] = v0.labels
df_magic_cluster = df_magic_cluster.groupby('parc', as_index=True).mean()
print('end magic', df_magic.shape)
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v0.draw_piechart_graph(ax, ax1, type_pt='gene', gene_exp=df_magic_cluster['GATA1'].values, title='GATA1')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v0, np.arange(0, len(true_label)))
draw_trajectory_gams(tsnem, super_clus_ds_PCA_loc, super_labels, super_labels, v0.edgelist_maxout,
v0.x_lazy, v0.alpha_teleport, v0.single_cell_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
sub_terminal_clusters=v0.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=str(Xin.shape[1]))
plt.show()
print('super labels', set(super_labels))
ad.obs['via0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['via0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA', 'ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
sc.pl.matrixplot(magic_ad, marker_genes, groupby='via0_label', dendrogram=True)
'''
sc.tl.rank_genes_groups(ad, groupby='via0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="via0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='via0_label', n_genes = 3) # plot the result
'''
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34', 'GATA1', 'IL3RA']: # ,'SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
plt.show()
super_edges = v0.edgelist_maxout # v0.edgelist
tsi_list = get_loc_terminal_states(v0, Xin)
v1 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.95, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=v0.terminal_clusters, is_coarse=False, full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
random_seed=v0_random_seed, pseudotime_threshold_TS=10) # *.4super_terminal_cells = tsi_list #3root=1,
v1.run_VIA()
labels = v1.labels
v1.make_JSON(filename='scRNA_Hema_via1_temp.js')
df_magic_cluster = df_magic.copy()
df_magic_cluster['via1'] = v1.labels
df_magic_cluster = df_magic_cluster.groupby('via1', as_index=True).mean()
# print('df_magic_cluster', df_magic_cluster)
'''
#Get the clustsergraph gene expression on topology
for gene_i in gene_list_magic:
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v1.draw_piechart_graph(ax,ax1,type_pt='gene', gene_exp = df_magic_cluster[gene_i].values, title = gene_i)
plt.show()
'''
ad.obs['parc1_label'] = [str(i) for i in labels]
'''
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in v1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = v0.knn_struct.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=v1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
# print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # v1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34']: # ['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
# v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name] + 'VIA MAGIC')
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov)[idx])
# graph_hnsw = v0.knngraph_visual()
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# phate_op = phate.PHATE()
# embedding = phate_op.fit_transform(adata_counts.obsm['X_pca'][:, 0:20])
# embedding = embedding[idx, :]
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
# DRAW EVOLUTION PATHS
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Toy_comparisons(ncomps=10, knn=30, random_seed=42, dataset='Toy3', root_user='M1',
foldername="/home/shobi/Trajectory/Datasets/Toy3/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
# root_user = ["T1_M1", "T2_M1"] # "M1" # #'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy3":
print('dataset Toy3')
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv",
delimiter=",")
#df_counts = pd.read_csv(foldername + "Toy3_noise_100genes_thinfactor8.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
print('df_ids', df_ids.columns)
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C107'
if dataset == "Toy4": # 2 disconnected components
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "Toy4_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
print(df_counts.shape, 'df_counts shape')
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T2_M1'
palantir_root = 'C107'
if dataset == "Connected":
df_counts = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000.csv", delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected/ToyConnected_M9_n2000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1'
if dataset == "Connected2":
df_counts = pd.read_csv(foldername + "Connected2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected2_noise_500genes.csv", 'rt',delimiter=",")
df_ids = pd.read_csv(foldername + "Connected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C11'
# suggest to use visual jaccard pruning of 1 (this doesnt alter underlying graph, just the display. can also use "M2" as the starting root,
if dataset == "ToyMultiM11":
df_counts = pd.read_csv(foldername + "Toymulti_M11_n3000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyMulti_M11_noised.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "Toymulti_M11_n3000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv( "/home/shobi/Trajectory/Datasets/ToyMultifurcating_M11/Toymulti_M11_n3000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1005'
if dataset == "Cyclic": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_noise_100genes_thinfactor3.csv",
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1'
if dataset == "Cyclic2": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/ToyCyclic2_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C107'
if dataset == 'Bifurc2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/ToyBifurc2_noised.csv", delimiter=",")
df_ids = pd.read_csv( "/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000_ids_with_truetime.csv",delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1006'
if dataset == 'Disconnected2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/ToyDisconnected2_noise_500genes.csv",
delimiter=",")
df_ids = pd.read_csv(
"/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv")
root_user = ['T1_M1', 'T1_M2', 'T1_M3'] # 'T1_M1'
paga_root = 'T1_M1'
palantir_root = 'C125'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
# df_ids.to_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000_ids_sorted_with_truetime.csv")
# df_counts['group_id'] = df_ids['group_id']#to split Toy4
# df_counts['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_ids['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_counts = df_counts[df_counts['main_Traj']=='T2']#to split Toy4
# df_ids = df_ids[df_ids['main_Traj'] == 'T2']#to split Toy4
#true_time = df_ids['true_time']
true_label = df_ids['group_id'].tolist()
# df_counts = df_counts.drop('main_Traj', 1)#to split Toy4
# df_counts = df_counts.drop('group_id', 1)#to split Toy4
# df_ids = df_ids.reset_index(drop=True)#to split Toy4
# df_counts = df_counts.reset_index(drop=True)#to split Toy4
# true_label = df_ids['group_id'] #to split Toy4
print("shape", df_counts.index, df_ids.index)
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# comparisons
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
do_paga = False #
do_palantir = False #
# comparisons
if do_paga == True:
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X', ) # n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
# sc.tl.diffmap(adata_counts, n_comps=ncomps)
sc.tl.diffmap(adata_counts, n_comps=200) # default retains n_comps = 15
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0, random_state=10)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['leiden','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
df_paga = pd.DataFrame()
df_paga['paga_dpt'] = adata_counts.obs['dpt_pseudotime'].values
correlation = df_paga['paga_dpt'].corr(df_ids['true_time'])
print('corr paga knn', knn, correlation)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
# X = df_counts.values
'''
# palantir
if do_palantir == True:
print(palantir.__file__) # location of palantir source code
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts # palantir.preprocess.normalize_counts(counts)
# pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps) #normally use
pca_projections = counts
dm_res = palantir.utils.run_diffusion_maps(pca_projections, knn=knn,
n_components=300) ## n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
# c107 for T1_M1, C42 for T2_M1 disconnected
# C1 for M8_connected, C1005 for multi_M11 , 'C1006 for bifurc2'
pr_res = palantir.core.run_palantir(ms_data, early_cell=palantir_root, num_waypoints=500, knn=knn)
df_palantir = pd.read_csv(
'/home/shobi/Trajectory/Datasets/Toy3/palantir_pt.csv') # /home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
pt = df_palantir['pt']
correlation = pt.corr(true_time)
print('corr Palantir', correlation)
print('')
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=pca_projections.shape[1])
plt.show()
'''
# from sklearn.decomposition import PCA
# pca = PCA(n_components=ncomps)
# pc = pca.fit_transform(df_counts)
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts.X
if dataset == 'Toy4':
jac_std_global = .15 # .15
else:
jac_std_global = 0.15 # .15#0.15 #bruge 1 til cyclic2, ellers 0.15
#
v0 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via1 knn', knn, correlation)
labels = v1.labels
# v1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# v1.run_VIA()
# labels = v1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
embedding = adata_counts.obsm['X_pca'][idx,
0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = umap.UMAP().fit_transform(Xin) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
'''
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(Xin.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_i)
# knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Toy(ncomps=10, knn=30, random_seed=41, dataset='Toy3', root_user=['M1'],
cluster_graph_pruning_std=1., foldername="/home/shobi/Trajectory/Datasets/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
if dataset == "Toy3":
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['M1']
paga_root = "M1"
if dataset == "Toy4": # 2 disconnected components
print('inside toy4')
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T1_M1'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
# print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
true_label = df_ids['group_id'].tolist()
#true_time = df_ids['true_time']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# true_label =['a' for i in true_label] #testing dummy true_label
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
# via_wrapper(adata_counts, true_label, embedding= adata_counts.obsm['X_pca'][:,0:2], root=[1], knn=30, ncomps=10,cluster_graph_pruning_std = 1)
# print('starting via wrapper disconn')
# via_wrapper_disconnected(adata_counts, true_label, embedding=adata_counts.obsm['X_pca'][:, 0:2], root=[23,902], preserve_disconnected=True, knn=10, ncomps=10, cluster_graph_pruning_std=1 ,random_seed=41)
# print('end via wrapper disconn')
if dataset == 'Toy4':
jac_std_global = 0.15 # 1
else:
jac_std_global = 0.15
import umap
embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:, 0:10]) # 50
# embedding = adata_counts.obsm['X_pca'][:, 0:2]
# plt.scatter(embedding[:,0],embedding[:,1])
# plt.show()
print('root user', root_user)
v0 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed, piegraph_arrow_head_width=0.4,
piegraph_edgeweight_scalingfactor=1.0) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
print('super labels', type(super_labels))
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
labels = v1.labels
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
n_downsample = 50
if len(labels) > n_downsample: # just testing the downsampling and indices. Not actually downsampling
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
embedding = embedding[idx, :]
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
# embedding = adata_counts.obsm['X_pca'][idx, 0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
'''
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax2.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
'''
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
v1.get_gene_expression(subset_, title_gene=gene_i)
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Bcell(ncomps=50, knn=20, random_seed=0, cluster_graph_pruning_std=.15,path='/home/shobi/Trajectory/Datasets/Bcell/'):
print('Input params: ncomp, knn, random seed', ncomps, knn, random_seed)
# https://github.com/STATegraData/STATegraData
def run_zheng_Bcell(adata, min_counts=3, n_top_genes=500, do_HVG=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
'''
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
'''
sc.pp.normalize_total(adata, target_sum=1e4)
if do_HVG == True:
sc.pp.log1p(adata)
'''
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False )
adata = adata[:, filter_result.gene_subset] # subset the genes
'''
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes, min_mean=0.0125, max_mean=3,
min_disp=0.5) # this function expects logarithmized data
print('len hvg ', sum(adata.var.highly_variable))
adata = adata[:, adata.var.highly_variable]
sc.pp.normalize_per_cell(adata) # renormalize after filtering
# if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
if do_HVG == False: sc.pp.log1p(adata)
sc.pp.scale(adata, max_value=10) # scale to unit variance and shift to zero mean
return adata
'''
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.columns = [i for i in ad.var_names]
# print('norm df', norm_df_pal)
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c42' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=ncomps)
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Ldha', 'Foxo1', 'Lig4'] # , 'Slc7a5']#,'Slc7a5']#,'Sp7','Zfp629']
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, Bcell_marker_gene_list])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
'''
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def find_time_Bcell(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID_Bcell(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
Bcell = pd.read_csv(path + 'genes_count_table.txt', sep='\t')
gene_name = pd.read_csv(path + 'genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
time_list = [find_time_Bcell(s) for s in Bcell_columns]
print('time list set', set(time_list))
adata_counts.obs['TimeStamp'] = [str(tt) for tt in time_list]
ID_list = [find_cellID_Bcell(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
print('timelist', list(set(time_list)))
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
small_large_gene_list = ['Kit', 'Pcna', 'Ptprc', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b', 'Mme',
'Spn']
list_var_names = [s for s in adata_counts_unfiltered.var_names]
matching = [s for s in list_var_names if "IgG" in s]
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
for gene_name in small_large_gene_list:
print('looking at small-big list')
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
# diff_list = [i for i in diff_list if i in list_var_names] #based on paper STable1 https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.2006506#pbio.2006506.s007
# adata_counts = adata_counts[:,diff_list] #if using these, then set do-HVG to False
print('adata counts difflisted', adata_counts.shape)
adata_counts = run_zheng_Bcell(adata_counts, n_top_genes=5000, min_counts=30,
do_HVG=True) # 5000 for better ordering
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
# (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
marker_genes = {"small": ['Rag2', 'Rag1', 'Pcna', 'Myc', 'Ccnd2', 'Cdkn1a', 'Smad4', 'Smad3', 'Cdkn2a'],
# B220 = Ptprc, PCNA negative for non cycling
"large": ['Ighm', 'Kit', 'Ptprc', 'Cd19', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b'],
"Pre-B2": ['Mme', 'Spn']} # 'Cd19','Cxcl13',,'Kit'
print('make the v0 matrix plot')
mplot_adata = adata_counts_unfiltered.copy() # mplot_adata is for heatmaps so that we keep all genes
mplot_adata = run_zheng_Bcell(mplot_adata, n_top_genes=25000, min_counts=1, do_HVG=False)
# mplot_adata.X[mplot_adata.X>10] =10
# mplot_adata.X[mplot_adata.X< -1] = -1
# sc.pl.matrixplot(mplot_adata, marker_genes, groupby='TimeStamp', dendrogram=True)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=200) # ncomps
# df_bcell_pc = pd.DataFrame(adata_counts.obsm['X_pca'])
# print('df_bcell_pc.shape',df_bcell_pc.shape)
# df_bcell_pc['time'] = [str(i) for i in time_list]
# df_bcell_pc.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PCs.csv')
# sc.pl.pca_variance_ratio(adata_counts, log=True)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
# sc.pl.heatmap(mplot_adata, var_names = small_large_gene_list,groupby = 'TimeStamp', dendrogram = True)
embedding = umap.UMAP(random_state=42, n_neighbors=15, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
df_umap = pd.DataFrame(embedding)
# df_umap.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_umap.csv')
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
for i in list(set(time_list)):
loc = np.where(np.asarray(time_list) == i)[0]
ax4.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap_[color_dict[i]], alpha=1, label=str(i))
if i == 0:
for xx in range(len(loc)):
poss = loc[xx]
ax4.text(embedding[poss, 0], embedding[poss, 1], 'c' + str(xx))
ax4.legend()
ax1.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Pcna'].X.flatten(), alpha=1)
ax1.set_title('Pcna, cycling')
ax2.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Vpreb1'].X.flatten(), alpha=1)
ax2.set_title('Vpreb1')
ax3.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Cd24a'].X.flatten(), alpha=1)
ax3.set_title('Cd24a')
# ax2.text(embedding[i, 0], embedding[i, 1], str(i))
'''
for i, j in enumerate(list(set(ID_list))):
loc = np.where(np.asarray(ID_list) == j)
if 'r'in j: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j), edgecolors = 'black' )
else: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j))
'''
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
#run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
input_via = adata_counts.obsm['X_pca'][:, 0:ncomps]
df_input = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
df_annot = pd.DataFrame(['t' + str(i) for i in true_label])
# df_input.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PC_5000HVG.csv')
# df_annot.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_annots.csv')
root_user = [42]
v0 = VIA(input_via, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
cluster_graph_pruning_std=cluster_graph_pruning_std,
root_user=root_user, preserve_disconnected=True, random_seed=random_seed,
do_impute_bool=True) # *.4#root_user = 34
v0.run_VIA()
super_labels = v0.labels
tsi_list = get_loc_terminal_states(via0=v0, X_input=adata_counts.obsm['X_pca'][:, 0:ncomps])
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, is_coarse=False,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, full_neighbor_array=v0.full_neighbor_array,
full_distance_array=v0.full_distance_array, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=v0.terminal_clusters, random_seed=random_seed)
v1.run_VIA()
labels = v1.labels
super_edges = v0.edgelist
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4', 'Sp7', 'Zfp629'] # irf4 down-up
df_ = pd.DataFrame(adata_counts_unfiltered.X) # no normalization, or scaling of the gene count values
df_.columns = [i for i in adata_counts_unfiltered.var_names]
df_Bcell_marker = df_[Bcell_marker_gene_list]
print(df_Bcell_marker.shape, 'df_Bcell_marker.shape')
df_Bcell_marker.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_markergenes.csv')
# v0 is run with "do_impute" = true, hence it stores the full graph (in subsequent iterations we dont recompute and store the full unpruned knn graph)
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=Bcell_marker_gene_list)
for gene_name in Bcell_marker_gene_list:
# loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
subset_ = df_magic[gene_name].values
v1.get_gene_expression(subset_, title_gene=gene_name)
# magic_ad = adata_counts_unfiltered.X[:, loc_gata]
# v1.get_gene_expression(magic_ad, gene_name)
n_downsample = 100
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
# idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
idx = np.arange(0, len(labels))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list((np.asarray(true_label)[idx]))
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
graph_embedding = v0.knngraph_visual(input_via[idx, 0:5], knn_umap=10, downsampled=True)
embedding_hnsw = v0.run_umap_hnsw(input_via[idx, 0:5], graph_embedding)
# embedding = embedding_hnsw
# loc0 = np.where(np.asarray(true_label)==0)[0]
# for item in loc0:
# print(item, 'at', embedding[item,:])
embedding = embedding[idx, :]
print('tsne downsampled size', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:5]) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.arange(0, len(labels)) # np.random.randint(len(labels), size=len(labels))
sc_pt_markov = v1.single_cell_pt_markov
# embedding = umap.UMAP(random_state=42, n_neighbors=15, init=umap_init).fit_transform( adata_counts.obsm['X_pca'][:, 0:5])
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def plot_EB():
# genes along lineage cluster path
df_groupby_p1 = pd.read_csv(
'/home/shobi/Trajectory/Datasets/EB_Phate/df_groupbyParc1_knn20_pc100_seed20_allgenes.csv')
path_clusters = [43, 38, 42, 56, 7,
3] # NC[43,41,16,2,3,6]#SMP[43,41,16,14,11,18]#C[43,41,16,14,12,15]#NS3[43,38,42,56,7,3]
target = "NS 3" # 'NC 6' #'SMP 18'#' Cardiac 15'
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'], 'NS': ['LHX2', 'NR2F1', 'DMRT3', 'LMX1A',
# 'KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1','PAX6', 'ZBTB16','NPAS1', 'SOX1'
'NKX2-8', 'EN2'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'],
'Pre-NE': ['POU5F1', 'OTX2'], 'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6'],
# 'OLIG3','HOXD1', 'ZIC2', 'ZIC5','HOXA2','HOXB2'
'ESC': ['NANOG', 'POU5F1'], 'Pre-NE': ['POU5F1', 'OTX2'], 'Lat-ME': ['TBX5', 'HOXD9', 'MYC']}
relevant_genes = []
relevant_keys = ['ESC', 'Pre-NE', 'NE', 'NP',
'NS'] # NC['ESC', 'Pre-NE', 'NE', 'NC']#SMP['ESC','PS/ME','Lat-ME','SMP']#NS['ESC', 'Pre-NE', 'NE', 'NP', 'NS']
dict_subset = {key: value for key, value in marker_genes_dict.items() if key in relevant_keys}
print('dict subset', dict_subset)
for key in relevant_keys:
relevant_genes.append(marker_genes_dict[key])
relevant_genes = [item for sublist in relevant_genes for item in sublist]
print(relevant_genes)
df_groupby_p1 = df_groupby_p1.set_index('parc1')
df_groupby_p1 = df_groupby_p1.loc[path_clusters]
df_groupby_p1 = df_groupby_p1[relevant_genes]
df_groupby_p1 = df_groupby_p1.transpose()
# print( df_groupby_p1.head)
# print(df_groupby_p1)
ax = sns.heatmap(df_groupby_p1, vmin=-1, vmax=1, yticklabels=True)
ax.set_title('target ' + str(target))
plt.show()
# df_groupby_p1 = pd.concat([df_groupby_p1,df_groupby_p1])
# adata = sc.AnnData(df_groupby_p1)
# adata.var_names = df_groupby_p1.columns
# print(adata.var_names)
# adata.obs['parc1'] = ['43','38','42','56','7','3','43','38','42','56','7','3']
# print(adata.obs['parc1'])
# sc.pl.matrixplot(adata, dict_subset, groupby='parc1', vmax=1, vmin=-1, dendrogram=False)
def main_EB_clean(ncomps=30, knn=20, v0_random_seed=24, cluster_graph_pruning_std=.15,
foldername='/home/shobi/Trajectory/Datasets/EB_Phate/'):
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'],
'NS': ['KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1', 'NPAS1', 'LHX2', 'NR2F1',
'NPAS1', 'DMRT3', 'LMX1A',
'NKX2-8', 'EN2', 'SOX1', 'PAX6', 'ZBTB16'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'], 'Pre-NE': ['POU5F1', 'OTX2'],
'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'OLIG3', 'HOXD1', 'ZIC2', 'ZIC5', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6',
'HOXA2', 'HOXB2'], 'ESC': ['NANOG', 'POU5F1', 'OTX2'], 'Pre-NE': ['POU5F1', 'OTX2']}
marker_genes_list = []
for key in marker_genes_dict:
for item in marker_genes_dict[key]:
marker_genes_list.append(item)
v0_too_big = 0.3
v1_too_big = 0.05
n_var_genes = 'no filtering for HVG' # 15000
print('ncomps, knn, n_var_genes, v0big, p1big, randomseed, time', ncomps, knn, n_var_genes, v0_too_big, v1_too_big,
v0_random_seed, time.ctime())
# TI_pcs = pd.read_csv(foldername+'PCA_TI_200_final.csv')
# TI_pcs is PCA run on data that has been: filtered (remove cells with too large or small library count - can directly use all cells in EBdata.mat), library normed, sqrt transform, scaled to unit variance/zero mean
# TI_pcs = TI_pcs.values[:, 1:]
from scipy.io import loadmat
annots = loadmat(
foldername + 'EBdata.mat') # has been filtered but not yet normed (by library size) nor other subsequent pre-processing steps
# print('annots', annots)
data = annots['data'].toarray() # (16825, 17580) (cells and genes have been filtered)
# print('data min max', np.max(data), np.min(data), data[1, 0:20], data[5, 250:270], data[1000, 15000:15050])
loc_ = np.where((data < 1) & (data > 0))
temp = data[(data < 1) & (data > 0)]
# print('temp non int', temp)
time_labels = annots['cells'].flatten().tolist()
# df_timelabels = pd.DataFrame(time_labels, columns=['true_time_labels'])
# df_timelabels.to_csv(foldername+'EB_true_time_labels.csv')
gene_names_raw = annots['EBgenes_name'] # (17580, 1) genes
adata = sc.AnnData(data)
gene_names = []
for i in gene_names_raw:
gene_names.append(i[0][0])
adata.var_names = gene_names
adata.obs['time'] = ['Day' + str(i) for i in time_labels]
adata.X = sc.pp.normalize_total(adata, inplace=False)['X'] # normalize by library after filtering
adata.X = np.sqrt(adata.X) # follow Phate paper which doesnt take log1() but instead does sqrt() transformation
Y_phate = pd.read_csv(foldername + 'EB_phate_embedding.csv')
Y_phate = Y_phate.values
# phate_operator = phate.PHATE(n_jobs=-1)
# Y_phate = phate_operator.fit_transform(adata.X) # before scaling. as done in PHATE
scale = False # scaling mostly improves the cluster-graph heatmap of genes vs clusters. doesnt sway VIA performance
if scale == True: # we scale before VIA. scaling not needed for PHATE
print('pp scaled')
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X, axis=0)
print('data max min after SCALED', np.max(adata.X), np.min(adata.X))
else:
print('not pp scaled')
sc.tl.pca(adata, svd_solver='arpack', n_comps=200, random_state=0)
# adata.obsm['X_pca'] = TI_pcs
input_data = adata.obsm['X_pca'][:, 0:ncomps]
print('do v0')
root_user = [1]
v0 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v0_too_big, root_user=root_user, dataset='EB', random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=True) # *.4 root=1,
v0.run_VIA()
tsi_list = get_loc_terminal_states(v0, input_data)
v1 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v1_too_big, super_cluster_labels=v0.labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
full_neighbor_array=v0.full_neighbor_array,
full_distance_array=v0.full_distance_array, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='EB',
super_terminal_clusters=v0.terminal_clusters, random_seed=21)
v1.run_VIA()
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(Y_phate[:, 0], Y_phate[:, 1], c=time_labels, s=5, cmap='viridis', alpha=0.5)
ax2.scatter(Y_phate[:, 0], Y_phate[:, 1], c=v1.single_cell_pt_markov, s=5, cmap='viridis', alpha=0.5)
ax1.set_title('Embryoid: Annotated Days')
ax2.set_title('Embryoid VIA Pseudotime (Randomseed' + str(v0_random_seed) + ')')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(v1.labels)))
draw_trajectory_gams(Y_phate, super_clus_ds_PCA_loc, v1.labels, v0.labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, time_labels, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times and Paths', ncomp=ncomps)
knn_hnsw = make_knn_embeddedspace(Y_phate)
draw_sc_evolution_trajectory_dijkstra(v1, Y_phate, knn_hnsw, v0.full_graph_shortpath,
idx=np.arange(0, input_data.shape[0]))
plt.show()
adata.obs['via0'] = [str(i) for i in v0.labels]
adata.obs['parc1'] = [str(i) for i in v1.labels]
adata.obs['terminal_state'] = ['True' if i in v1.terminal_clusters else 'False' for i in v1.labels]
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X,
axis=0) # to improve scale of the matrix plot we will scale
sc.pl.matrixplot(adata, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True, figsize=[20, 10])
def main_EB(ncomps=30, knn=20, v0_random_seed=24):
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'],
'NS': ['KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1', 'NPAS1', 'LHX2', 'NR2F1',
'NPAS1', 'DMRT3', 'LMX1A',
'NKX2-8', 'EN2', 'SOX1', 'PAX6', 'ZBTB16'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'], 'Pre-NE': ['POU5F1', 'OTX2'],
'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'OLIG3', 'HOXD1', 'ZIC2', 'ZIC5', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6',
'HOXA2', 'HOXB2'], 'ESC': ['NANOG', 'POU5F1', 'OTX2'], 'Pre-NE': ['POU5F1', 'OTX2']}
marker_genes_list = []
for key in marker_genes_dict:
for item in marker_genes_dict[key]:
marker_genes_list.append(item)
v0_too_big = 0.3
v1_too_big = 0.05
root_user = 1
n_var_genes = 'no filtering for HVG' # 15000
print('ncomps, knn, n_var_genes, v0big, p1big, randomseed, time', ncomps, knn, n_var_genes, v0_too_big, v1_too_big,
v0_random_seed, time.ctime())
# data = data.drop(['Unnamed: 0'], axis=1)
TI_pcs = pd.read_csv(
'/home/shobi/Trajectory/Datasets/EB_Phate/PCA_TI_200_final.csv') # filtered, library normed, sqrt transform, scaled to unit variance/zero mean
TI_pcs = TI_pcs.values[:, 1:]
umap_pcs = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_umap_200_TuesAM.csv')
umap_pcs = umap_pcs.values[:, 1:]
# print('TI PC shape', TI_pcs.shape)
from scipy.io import loadmat
annots = loadmat(
'/home/shobi/Trajectory/Datasets/EB_Phate/EBdata.mat') # has been filtered but not yet normed (by library s
data = annots['data'].toarray() # (16825, 17580) (cells and genes have been filtered)
# print('data min max', np.max(data), np.min(data), data[1, 0:20], data[5, 250:270], data[1000, 15000:15050])
# loc_ = np.where((data < 1) & (data > 0))
temp = data[(data < 1) & (data > 0)]
# print('temp non int', temp)
time_labels = annots['cells'].flatten().tolist()
import scprep
dict_labels = {'Day 00-03': 0, 'Day 06-09': 2, 'Day 12-15': 4, 'Day 18-21': 6, 'Day 24-27': 8}
# print(annots.keys()) # (['__header__', '__version__', '__globals__', 'EBgenes_name', 'cells', 'data'])
gene_names_raw = annots['EBgenes_name'] # (17580, 1) genes
print(data.shape)
adata = sc.AnnData(data)
# time_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/labels_1.csv')
# time_labels = time_labels.drop(['Unnamed: 0'], axis=1)
# time_labels = time_labels['time']
# adata.obs['time'] = [str(i) for i in time_labels]
gene_names = []
for i in gene_names_raw:
gene_names.append(i[0][0])
adata.var_names = gene_names
adata.obs['time'] = [str(i) for i in time_labels]
# filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=5000, log=False) #dont take log
adata_umap = adata.copy()
# adata = adata[:, filter_result.gene_subset] # subset the genes
# sc.pp.normalize_per_cell(adata, min_counts=2) # renormalize after filtering
print('data max min BEFORE NORM', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
rowsums = adata.X.sum(axis=1)
# adata.X = adata.X / rowsums[:, np.newaxis]
# adata.X = sc.pp.normalize_total(adata, exclude_highly_expressed=True, max_fraction=0.05, inplace=False)['X'] #normalize after filtering
adata.X = sc.pp.normalize_total(adata, inplace=False)['X'] # normalize after filtering
print('data max min after NORM', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
adata.X = np.sqrt(adata.X) # follow Phate paper which doesnt take log1() but instead does sqrt() transformation
adata_umap.X = np.sqrt(adata_umap.X)
print('data max min after SQRT', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
# sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
phate_operator = phate.PHATE(n_jobs=-1)
Y_phate = phate_operator.fit_transform(adata.X)
scprep.plot.scatter2d(Y_phate, c=time_labels, figsize=(12, 8), cmap="Spectral",
ticks=False, label_prefix="PHATE")
plt.show()
'''
Y_phate = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/EB_phate_embedding.csv')
Y_phate = Y_phate.values
scale = True
if scale == True:
print('pp scaled')
# sc.pp.scale(adata)
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X, axis=0)
sc.pp.scale(adata_umap)
print('data max min after SCALED', np.max(adata.X), np.min(adata.X))
else:
print('not pp scaled')
print('sqrt transformed')
# sc.pp.recipe_zheng17(adata, n_top_genes=15000) #expects non-log data
# g = sc.tl.rank_genes_groups(adata, groupby='time', use_raw=True, n_genes=10)#method='t-test_overestim_var'
# sc.pl.rank_genes_groups_heatmap(adata, n_genes=3, standard_scale='var')
'''
pcs = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_200_matlab.csv')
pcs = pcs.drop(['Unnamed: 0'], axis=1)
pcs = pcs.values
print(time.ctime())
ncomps = 50
input_data =pcs[:, 0:ncomps]
'''
print('v0_toobig, p1_toobig, v0randomseed', v0_too_big, v1_too_big, v0_random_seed)
print('do pca')
# sc.tl.pca(adata, svd_solver='arpack', n_comps=200, random_state = 0)
# sc.tl.pca(adata_umap, svd_solver='arpack', n_comps=200)
# df_pca_TI_200 = pd.DataFrame(adata.obsm['X_pca'])
# df_pca_TI_200.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_TI_200_TuesAM.csv')
# df_pca_umap_200 = pd.DataFrame(adata_umap.obsm['X_pca'])
# df_pca_umap_200.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_umap_200_TuesAM.csv')
adata.obsm['X_pca'] = TI_pcs
adata_umap.obsm['X_pca'] = umap_pcs
input_data = adata.obsm['X_pca'][:, 0:ncomps]
'''
#plot genes vs clusters for each trajectory
df_plot_gene = pd.DataFrame(adata.X, columns=[i for i in adata.var_names])
df_plot_gene = df_plot_gene[marker_genes_list]
previous_p1_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_labels_knn20_pc100_seed20.csv')
title_str = 'Terminal state 27 (Cardiac)'
gene_groups = ['ESC', 'PS/ME','EN','Cardiac']
clusters = [43,41,16,14,12,27]
'''
u_knn = 15
repulsion_strength = 1
n_pcs = 10
print('knn and repel', u_knn, repulsion_strength)
U = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_pc10_knn15.csv')
U = U.values[:, 1:]
U = Y_phate
# U = umap.UMAP(n_neighbors=u_knn, random_state=1, repulsion_strength=repulsion_strength).fit_transform(adata_umap.obsm['X_pca'][:, 0:n_pcs])
#print('start palantir', time.ctime())
# run_palantir_EB(adata, knn=knn, ncomps=ncomps, tsne=U, str_true_label=[str(i) for i in time_labels])
#print('end palantir', time.ctime())
# df_U = pd.DataFrame(U)
# df_U.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_pc10_knn15.csv')
print('do v0')
v0 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=v0_too_big, root_user=root_user, dataset='EB', random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=True) # *.4 root=1,
v0.run_VIA()
super_labels = v0.labels
v0_labels_df = pd.DataFrame(super_labels, columns=['v0_labels'])
v0_labels_df.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/p0_labels.csv')
adata.obs['via0'] = [str(i) for i in super_labels]
'''
df_temp1 = pd.DataFrame(adata.X, columns = [i for i in adata.var_names])
df_temp1 = df_temp1[marker_genes_list]
df_temp1['via0']=[str(i) for i in super_labels]
df_temp1 = df_temp1.groupby('via0').mean()
'''
# sns.clustermap(df_temp1, vmin=-1, vmax=1,xticklabels=True, yticklabels=True, row_cluster= False, col_cluster=True)
# sc.pl.matrixplot(adata, marker_genes_dict, groupby='via0', vmax=1, vmin =-1, dendrogram=True)
'''
sc.tl.rank_genes_groups(adata, groupby='via0', use_raw=True,
method='t-test_overestim_var', n_genes=5) # compute differential expression
sc.pl.rank_genes_groups_heatmap(adata, groupby='via0',vmin=-3, vmax=3) # plot the result
'''
p = hnswlib.Index(space='l2', dim=input_data.shape[1])
p.init_index(max_elements=input_data.shape[0], ef_construction=100, M=16)
p.add_items(input_data)
p.set_ef(30)
tsi_list = get_loc_terminal_states(v0, input_data)
v1 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=v1_too_big, is_coarse=False,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned, full_distance_array=v0.full_distance_array,
full_neighbor_array=v0.full_neighbor_array,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='EB',
super_terminal_clusters=v0.terminal_clusters, random_seed=v0_random_seed)
v1.run_VIA()
# adata.obs['parc1'] = [str(i) for i in v1.labels]
# sc.pl.matrixplot(adata, marker_genes, groupby='parc1', dendrogram=True)
labels = v1.labels
'''
df_labels = pd.DataFrame({'v0_labels':v0.labels,'p1_labels':v1.labels})
df_labels['sub_TS'] = [1 if i in v1.terminal_clusters else 0 for i in v1.labels]
df_labels['super_TS'] = [1 if i in v0.terminal_clusters else 0 for i in v0.labels]
df_labels.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_labels_knn20_pc100_seed20.csv')
df_temp2 = pd.DataFrame(adata.X, columns=[i for i in adata.var_names])
df_temp2 = df_temp2[marker_genes_list]
df_temp2['parc1'] = [str(i) for i in labels]
df_temp2 = df_temp2.groupby('parc1').mean()
df_temp2.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_groupbyParc1_knn20_pc100_seed20.csv')
'''
adata.obs['parc1'] = [str(i) for i in labels]
# df_ts = pd.DataFrame(adata.X, columns = [i for i in adata.var_names])
# df_ts = df_ts[marker_genes_list]
# df_ts['parc1'] = [str(i) for i in labels]
adata.obs['terminal_state'] = ['True' if i in v1.terminal_clusters else 'False' for i in labels]
# df_ts = df_ts[df_ts['terminal_state']=='True']
adata_TS = adata[adata.obs['terminal_state'] == 'True']
# sns.clustermap(df_temp1, vmin=-1, vmax=1, xticklabels=True, yticklabels=True, row_cluster=False, col_cluster=True)
sc.pl.matrixplot(adata, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True)
# sc.pl.matrixplot(adata_TS, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True)
# U = umap.UMAP(n_neighbors=10, random_state=0, repulsion_strength=repulsion_strength).fit_transform(input_data[:, 0:n_pcs])
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(U[:, 0], U[:, 1], c=time_labels, s=5, cmap='viridis', alpha=0.5)
ax2.scatter(U[:, 0], U[:, 1], c=v1.single_cell_pt_markov, s=5, cmap='viridis', alpha=0.5)
plt.title('repulsion and knn and pcs ' + str(repulsion_strength) + ' ' + str(u_knn) + ' ' + str(
n_pcs) + ' randseed' + str(v0_random_seed))
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(labels)))
draw_trajectory_gams(U, super_clus_ds_PCA_loc, labels, super_labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, time_labels, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
knn_hnsw = make_knn_embeddedspace(U)
draw_sc_evolution_trajectory_dijkstra(v1, U, knn_hnsw, v0.full_graph_shortpath,
idx=np.arange(0, input_data.shape[0]))
plt.show()
def main_mESC(knn=30, v0_random_seed=42, cluster_graph_pruning_std=.0, run_palantir_func=False):
import random
rand_str = 950 # random.randint(1, 999)
print('rand string', rand_str)
print('knn', knn)
data_random_seed = 20
root = '0.0'
type_germ = 'Meso'
normalize = True
data = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/mESC_' + type_germ + '_markers.csv')
print('counts', data.groupby('day').count())
# print(data.head())
print(data.shape)
n_sub = 7000
print('type,', type_germ, 'nelements', n_sub, 'v0 randseed', v0_random_seed)
title_string = 'randstr:' + str(rand_str) + ' Knn' + str(knn) + ' nelements:' + str(n_sub) + ' ' + 'meso'
# data = data[data['day']!=0]
v0_too_big = 0.3
p1_too_big = 0.15 # .15
print('v0 and p1 too big', v0_too_big, p1_too_big)
data_sub = data[data['day'] == 0.0]
np.random.seed(data_random_seed)
idx_sub = np.random.choice(a=np.arange(0, data_sub.shape[0]), size=min(n_sub, data_sub.shape[0]), replace=False,
p=None) # len(true_label)
data_sub = data_sub.values[idx_sub, :]
data_sub = pd.DataFrame(data_sub, columns=data.columns)
for i in [1.0, 2, 2.5, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0]:
sub = data[data['day'] == i]
print(sub.shape[0])
np.random.seed(data_random_seed)
idx_sub = np.random.choice(a=np.arange(0, sub.shape[0]), size=min(n_sub, sub.shape[0]), replace=False,
p=None) # len(true_label)
sub = sub.values[idx_sub, :]
print('size of sub', sub.shape)
sub = pd.DataFrame(sub, columns=data.columns)
data_sub = pd.concat([data_sub, sub], axis=0, ignore_index=True, sort=True)
true_label = data_sub['day']
true_type = data_sub['type']
data = data_sub.drop(['day', 'Unnamed: 0', 'type'], axis=1)
# print('after subbing', data.head)
cols = ['Sca-1', 'CD41', 'Nestin', 'Desmin',
'CD24', 'FoxA2', 'Oct4', 'CD45', 'Ki67', 'Vimentin',
'Nanog', 'pStat3-705', 'Sox2', 'Flk-1', 'Tuj1',
'H3K9ac', 'Lin28', 'PDGFRa', 'EpCAM', 'CD44',
'GATA4', 'Klf4', 'CCR9', 'p53', 'SSEA1', 'IdU', 'Cdx2'] # 'bCatenin'
meso_good = ['CD24', 'FoxA2', 'Oct4', 'CD45', 'Ki67', 'Vimentin', 'Cdx2', 'CD54', 'pStat3-705', 'Sox2', 'Flk-1',
'Tuj1', 'SSEA1', 'H3K9ac', 'Lin28', 'PDGFRa', 'bCatenin', 'EpCAM', 'CD44', 'GATA4', 'Klf4', 'CCR9',
'p53']
marker_genes_ecto = ['Oct4', 'Nestin', 'CD45', 'Vimentin', 'Cdx2', 'Flk-1', 'PDGFRa', 'CD44',
'GATA4', 'CCR9', 'CD54', 'CD24', 'CD41', 'Tuji']
marker_genes_meso_paper_sub = ['Oct4', 'CD54', 'SSEA1', 'Lin28', 'Cdx2', 'CD45', 'Nanog', 'Sox2', 'Flk-1', 'Tuj1',
'PDGFRa', 'EpCAM', 'CD44', 'CCR9', 'GATA4']
marker_genes_meso_paper = ['Nestin', 'FoxA2', 'Oct4', 'CD45', 'Sox2', 'Flk-1', 'Tuj1', 'PDGFRa', 'EpCAM', 'CD44',
'GATA4', 'CCR9', 'Nanog', 'Cdx2', 'Vimentin'] # 'Nanog''Cdx2','Vimentin'
marker_genes_endo = ['Sca-1''Nestin', 'CD45', 'Vimentin', 'Cdx2', 'Flk-1', 'PDGFRa', 'CD44',
'GATA4', 'CCR9', 'CD54', 'CD24', 'CD41', 'Oct4']
marker_genes_meso = ['Sca-1', 'CD41', 'Nestin', 'Desmin', 'CD24', 'FoxA2', 'Oct4', 'CD45', 'Ki67', 'Vimentin',
'Cdx2', 'Nanog', 'pStat3-705', 'Sox2', 'Flk-1', 'Tuj1', 'H3K9ac', 'Lin28', 'PDGFRa', 'EpCAM',
'CD44', 'GATA4', 'Klf4', 'CCR9', 'p53', 'SSEA1', 'bCatenin', 'IdU'] # ,'c-Myc'
marker_dict = {'Ecto': marker_genes_ecto, 'Meso': marker_genes_meso, 'Endo': marker_genes_meso}
marker_genes = marker_dict[type_germ]
data = data[marker_genes]
print('marker genes ', marker_genes)
pre_fac_scale = [4, 1,
1] # 4,1,1 (4,1,1 is used in the paper but actually no scaling factor is really required, the results are unperturbed
pre_fac_scale_genes = ['H3K9ac', 'Lin28', 'Oct4']
for pre_scale_i, pre_gene_i in zip(pre_fac_scale, pre_fac_scale_genes):
data[pre_gene_i] = data[pre_gene_i] / pre_scale_i
print('prescaled gene', pre_gene_i, 'by factor', pre_scale_i)
scale_arcsinh = 5
raw = data.values
raw = raw.astype(np.float)
raw_df = pd.DataFrame(raw, columns=data.columns)
raw = raw / scale_arcsinh
raw = np.arcsinh(raw)
# print(data.shape, raw.shape)
adata = sc.AnnData(raw)
adata.var_names = data.columns
# print(adata.shape, len(data.columns))
true_label_int = [i for i in true_label]
adata.obs['day'] = ['0' + str(i) if i < 10 else str(i) for i in true_label_int]
true_label_str = [str(i) for i in
true_label_int] # the way find_root works is to match any part of root-user to majority truth
print(adata.obs['day'])
if normalize == True:
sc.pp.scale(adata, max_value=5)
print(colored('normalized', 'blue'))
else:
print(colored('NOT normalized', 'blue'))
print('adata', adata.shape)
# ncomps = 30
# sc.tl.pca(adata, svd_solver='arpack', n_comps=ncomps)
n_umap = adata.shape[0]
np.random.seed(data_random_seed)
udata = adata.X[:, :][0:n_umap]
# U = umap.UMAP().fit_transform(udata)
# U_df = pd.DataFrame(U, columns=['x', 'y'])
# U_df.to_csv('/home/shobi/Trajectory/Datasets/mESC/umap_89782cells_meso.csv')
idx = np.arange(0, adata.shape[
0]) # np.random.choice(a=np.arange(0, adata.shape[0]), size=adata.shape[0], replace=False, p=None) # len(true_label)
# idx=np.arange(0, len(true_label_int))
U = pd.read_csv(
'/home/shobi/Trajectory/Datasets/mESC/umap_89782cells_meso.csv') # umap_89782cells_7000each_Randseed20_meso.csv')
# U = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/phate_89782cells_mESC.csv')
U = U.values[0:len(true_label), 1:]
plt.scatter(U[:, 0], U[:, 1], c=true_label, cmap='jet', s=4, alpha=0.7)
plt.show()
'''
for gene_i in ['CD44', 'GATA4', 'PDGFRa', 'EpCAM']:
# subset = adata[[gene_i]].values #scale is not great so hard to visualize on the raw data expression
subset = adata[:, gene_i].X.flatten()
plt.scatter(U[:, 0], U[:, 1], c=subset, cmap='viridis', s=4, alpha=0.7)
plt.title(gene_i)
plt.show()
'''
print(U.shape)
# U_df = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/phate_89782cells_mESC.csv')
# U = U_df.drop('Unnamed: 0', 1)
U = U[idx, :]
# subsample start
n_subsample = len(true_label_int) # 50000 #palantir wont scale
U = U[0:n_subsample, :]
# phate_operator = phate.PHATE(n_jobs=-1)
# Y_phate = phate_operator.fit_transform(adata.X)
# phate_df = pd.DataFrame(Y_phate)
# phate_df.to_csv('/home/shobi/Trajectory/Datasets/mESC/phate_89782cells_mESC.csv')
true_label_int0 = list(np.asarray(true_label_int))
# Start Slingshot data prep
'''
slingshot_annots = true_label_int0[0:n_umap]
slingshot_annots = [int(i) for i in slingshot_annots]
Slingshot_annots = pd.DataFrame(slingshot_annots,columns = ['label'])
Slingshot_annots.to_csv('/home/shobi/Trajectory/Datasets/mESC/Slingshot_annots_int_10K_sep.csv')
Slingshot_data = pd.DataFrame(adata.X[0:n_umap], columns=marker_genes)
Slingshot_data.to_csv('/home/shobi/Trajectory/Datasets/mESC/Slingshot_input_data_10K_sep.csv')
# print('head sling shot data', Slingshot_data.head)
# print('head sling shot annots', Slingshot_annots.head)
print('slingshot data shape', Slingshot_data.shape)
# sling_adata =sc.AnnData(Slingshot_data)
'''
# end Slingshot data prep
adata = adata[idx]
true_label_int = list(np.asarray(true_label_int)[idx])
true_label_int = true_label_int[0:n_subsample]
true_label_str = list(np.asarray(true_label_str)[idx])
true_label_str = true_label_str[0:n_subsample]
true_type = list(np.asarray(true_type)[idx])
true_type = list(np.asarray(true_type)[idx])[0:n_subsample]
sc.tl.pca(adata, svd_solver='arpack', n_comps=20)
# plt.scatter(sling_adata.obsm['X_pca'][:,0],sling_adata.obsm['X_pca'][:,1], c = Slingshot_annots['label'])
plt.show()
print('time', time.ctime())
loc_start = np.where(np.asarray(true_label_int) == 0)[0][0]
adata.uns['iroot'] = loc_start
print('iroot', loc_start)
# Start PAGA
'''
sc.pp.neighbors(adata, n_neighbors=knn, n_pcs=28) # 4
sc.tl.draw_graph(adata)
# sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata, n_comps=28)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
#sc.pp.neighbors(adata, n_neighbors=knn, use_rep='X_diffmap') # 4
#sc.tl.draw_graph(adata)
sc.tl.leiden(adata, resolution=1.0, random_state=10)
sc.tl.paga(adata, groups='leiden')
adata.obs['group_id'] = true_label_int
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata, n_dcs=28)
print('time paga end', time.ctime())
plt.show()
df_paga_dpt = pd.DataFrame()
df_paga_dpt['paga_dpt'] = adata.obs['dpt_pseudotime'].values
df_paga_dpt['days'] = true_label_int
df_paga_dpt.to_csv('/home/shobi/Trajectory/Datasets/mESC/paga_dpt_knn' + str(knn) + '.csv')
sc.pl.paga(adata, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden', 'group_id', 'pseudotime'])
plt.show()
# sc.pl.matrixplot(adata, marker_genes_meso, groupby='day', dendrogram=True)
'''
# end PAGA
'''
#start palantir run
t_pal_start = time.time()
run_palantir_mESC(adata[0:n_subsample:], knn=knn, tsne=U, str_true_label = true_label_str)
print('palantir run time', round(time.time() - t_pal_start))
df_palantir = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/palantir_pt.csv')
df_palantir['days'] = true_label_int
df_palantir.to_csv('/home/shobi/Trajectory/Datasets/mESC/palantir_pt.csv')
'''
# df_ = pd.DataFrame(adata.X)
# df_.columns = [i for i in adata.var_names]
# df_.to_csv('/home/shobi/Trajectory/Datasets/mESC/transformed_normalized_input.csv')
df_ = pd.DataFrame(true_label_int, columns=['days'])
# df_.to_csv('/home/shobi/Trajectory/Datasets/mESC/annots_days.csv')
print('finished saving for monocle3')
v0 = VIA(adata.X, true_label_int, jac_std_global=0.3, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v0_too_big, resolution_parameter=2,
root_user=root, dataset='mESC', random_seed=v0_random_seed,
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=3,
do_impute_bool=True, is_coarse=True, preserve_disconnected=False, pseudotime_threshold_TS=40, x_lazy=0.99,
alpha_teleport=0.99) # *.4 root=1,
v0.run_VIA()
df_pt = v0.single_cell_pt_markov
f, (ax1, ax2,) = plt.subplots(1, 2, sharey=True)
s_genes = ''
for s in marker_genes:
s_genes = s_genes + ' ' + s
plt.title(str(len(true_label)) + 'cells ' + str(title_string) + '\n marker genes:' + s_genes, loc='left')
ax1.scatter(U[:, 0], U[:, 1], c=true_label_int, cmap='jet', s=4, alpha=0.7)
ax2.scatter(U[:, 0], U[:, 1], c=df_pt, cmap='jet', s=4, alpha=0.7)
print('SAVED TRUE')
df_pt = pd.DataFrame()
df_pt['via_knn'] = v0.single_cell_pt_markov
df_pt['days'] = true_label_int
df_pt.to_csv('/home/shobi/Trajectory/Datasets/mESC/noMCMC_nolazynotele_via_pt_knn_Feb2021' + str(
knn) + 'resolution2jacp15.csv')
adata.obs['via0'] = [str(i) for i in v0.labels]
# show geneplot
# sc.pl.matrixplot(adata, marker_genes, groupby='via0', dendrogram=True)
super_labels = v0.labels
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v0, np.arange(0, len(true_label)))
c_pt = v0.single_cell_pt_markov[0:n_umap]
print('draw trajectory for v0')
draw_trajectory_gams(U, super_clus_ds_PCA_loc, super_labels, super_labels, v0.edgelist_maxout,
v0.x_lazy, v0.alpha_teleport, c_pt, true_label_int, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
sub_terminal_clusters=v0.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=28)
'''
#show geneplot
for gene_i in ['CD44', 'GATA4', 'PDGFRa', 'EpCAM']:
# subset = data[[gene_i]].values
subset = adata[:, gene_i].X.flatten()
print('gene expression for', gene_i)
v0.get_gene_expression(subset, gene_i)
plt.show()
'''
tsi_list = get_loc_terminal_states(v0, adata.X)
v1 = VIA(adata.X, true_label_int, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=p1_too_big, super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root, is_coarse=False,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='mESC',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=3,
super_terminal_clusters=v0.terminal_clusters, random_seed=v0_random_seed,
full_neighbor_array=v0.full_neighbor_array, full_distance_array=v0.full_distance_array,
ig_full_graph=v0.ig_full_graph, csr_array_locally_pruned=v0.csr_array_locally_pruned,
pseudotime_threshold_TS=40)
v1.run_VIA()
df_pt['via_v1'] = v1.single_cell_pt_markov
df_pt.to_csv('/home/shobi/Trajectory/Datasets/mESC/noMCMC_nolazynotele_via_pt_knn_Feb2021' + str(
knn) + 'resolution2jacp15.csv')
adata.obs['parc1'] = [str(i) for i in v1.labels]
sc.pl.matrixplot(adata, marker_genes, groupby='parc1', dendrogram=True)
labels = v1.labels
for gene_i in ['CD44', 'GATA4', 'PDGFRa', 'EpCAM']:
# subset = data[[gene_i]].values
subset = adata[:, gene_i].X.flatten()
print('gene expression for', gene_i)
v1.get_gene_expression(subset, gene_i)
# X = adata.obsm['X_pca'][:,0:2]
# print(X.shape)
c_pt = v1.single_cell_pt_markov[0:n_umap]
c_type = true_type[0:n_umap]
dict_type = {'EB': 0, 'Endo': 5, "Meso": 10, 'Ecto': 15}
c_type = [dict_type[i] for i in c_type]
u_truelabel = true_label_int[0:n_umap]
# U = umap.UMAP().fit_transform(adata.obsm['X_pca'][idx, 0:ncomps])
# U = Y_phate[idx,:]
print('umap done', rand_str, time.ctime())
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
s_genes = ''
for s in marker_genes:
s_genes = s_genes + ' ' + s
plt.title(str(len(true_label)) + 'cells ' + str(title_string) + '\n marker genes:' + s_genes, loc='left')
ax1.scatter(U[:, 0], U[:, 1], c=true_label_int, cmap='jet', s=4, alpha=0.7)
ax2.scatter(U[:, 0], U[:, 1], c=c_pt, cmap='jet', s=4, alpha=0.7)
ax3.scatter(U[:, 0], U[:, 1], c=c_type, cmap='jet', s=4, alpha=0.7)
plt.show()
knn_hnsw = make_knn_embeddedspace(U)
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(labels)))
true_label_formatted = [int(10 * i) for i in u_truelabel]
draw_trajectory_gams(U, super_clus_ds_PCA_loc, labels, super_labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, c_pt, true_label_int, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=28)
# draw_sc_evolution_trajectory_dijkstra(v1, U, knn_hnsw, v0.full_graph_shortpath, np.arange(0, n_umap))
plt.show()
def main_scATAC_zscores(knn=20, ncomps=30, cluster_graph_pruning_std=.15):
# datasets can be downloaded from the link below
# https://nbviewer.jupyter.org/github/pinellolab/STREAM/blob/master/tutorial/archives/v0.4.1_and_earlier_versions/4.STREAM_scATAC-seq_k-mers.ipynb?flush_cache=true
# these are the kmers based feature matrix
# https://www.dropbox.com/sh/zv6z7f3kzrafwmq/AACAlU8akbO_a-JOeJkiWT1za?dl=0
# https://github.com/pinellolab/STREAM_atac
##KMER START
df = pd.read_csv("/home/shobi/Trajectory/Datasets/scATAC_Hemato/zscore_scaled_kmer.tsv",
sep='\t') # TF Zcores from STREAM NOT the original buenrostro corrected PCs
df = df.transpose()
print('df kmer size', df.shape)
new_header = df.iloc[0] # grab the first row for the header
df = df[1:] # take the data less the header row
df.columns = new_header # set the header row as the df header
df = df.apply(pd.to_numeric) # CONVERT ALL COLUMNS
true_label = | pd.read_csv("/home/shobi/Trajectory/Datasets/scATAC_Hemato/cell_label.csv", sep='\t') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
num = 200000
df = pd.DataFrame(columns=['广告行业','投放时段','电视台','用户年龄段','用户性别','教育水平','所在行业','消费水平'])
s = pd.Series(["酒类","家用电器","食品","饮料","邮电通讯"])
example_weights = [22, 13, 18, 19, 6]
df['广告行业'] = s.sample(n=num, weights=example_weights, replace=True).reset_index(drop=True)
s = pd.Series(["0:00-2:00","2:00-4:00","4:00-6:00","6:00-8:00","8:00-10:00","10:00-12:00","12:00-14:00","14:00-16:00","16:00-18:00","18:00-20:00","20:00-22:00","22:00-0:00"])
example_weights = [3, 3, 5, 2, 6,4,1,2,4,3,12,14]
df['投放时段'] =s.sample(n=num, weights=example_weights, replace=True).reset_index(drop=True)
s = pd.Series(["湖南卫视","浙江卫视","CCTV","东方卫视"])
example_weights = [1,2,8,3]
df['电视台'] =s.sample(n=num, weights=example_weights, replace=True).reset_index(drop=True)
s = pd.Series(["0-10","10-20","20-30","30+"])
example_weights = [5,10,12,40]
df['用户年龄段'] =s.sample(n=num, weights=example_weights, replace=True).reset_index(drop=True)
s = pd.Series(["男","女"])
example_weights = [94,106]
df['用户性别'] =s.sample(n=num, weights=example_weights, replace=True).reset_index(drop=True)
s = pd.Series(["高中及以下","大专","本科及以上"])
example_weights = [76,112,285]
df['教育水平'] =s.sample(n=num, weights=example_weights, replace=True).reset_index(drop=True)
s = | pd.Series(["教育","金融保险","社会公共管理","IT电子通信","医药卫生","住宿旅游"]) | pandas.Series |
import numpy as np
import pandas as pd
import joblib
# Read the CSV file
rider_provider='CARS.csv'
car_dataframe = | pd.read_csv(rider_provider) | pandas.read_csv |
# generate and save characteristic file from text column of csv file and a categorical variable
from imp import reload
from nltk.corpus import stopwords
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib as mpl
import nltk,re,pprint
import sys,glob,os
import operator, string, argparse, math
def drawProgressBar(percent, barLen = 50): #just a progress bar so that you dont lose patience
sys.stdout.write("\r")
progress = ""
for i in range(barLen):
if i<int(barLen * percent):
progress += "="
else:
progress += " "
sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100))
sys.stdout.flush()
class dataProcessor:
def __init__(self, fname, keep_factors = ['Job Description', 'Company Name', 'Industry'], group_column = 'Industry'):
self.dataInitial = pd.read_csv(fname, encoding="latin")
self.dataInitialSmall = self.dataInitial[['Job Description', 'Company Name', 'Industry']]
self.swords = set(stopwords.words('english'))
#print(len(self.swords),"stopwords present!")
self.dataInitialGrouped = self.dataInitialSmall.groupby([group_column]).count()
| pd.set_option('display.max_rows', 50) | pandas.set_option |
import os
import re
import numpy as np
import pandas as pd
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.book import *
#os.getcwd()
#os.chdir('C:/Users/<NAME>/Documents/repositories/fake-news')
f = open('survey/survey_words.csv', 'r')
dat = | pd.read_csv(f) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/10/30 16:06
Desc: 新浪行业-板块行情
http://finance.sina.com.cn/stock/sl/
"""
import json
import math
import pandas as pd
import requests
from akshare.utils import demjson
from tqdm import tqdm
def stock_sector_spot(indicator: str = "新浪行业") -> pd.DataFrame:
"""
新浪行业-板块行情
http://finance.sina.com.cn/stock/sl/
:param indicator: choice of {"新浪行业", "启明星行业", "概念", "地域", "行业"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
if indicator == "新浪行业":
url = "http://vip.stock.finance.sina.com.cn/q/view/newSinaHy.php"
r = requests.get(url)
if indicator == "启明星行业":
url = "http://biz.finance.sina.com.cn/hq/qmxIndustryHq.php"
r = requests.get(url)
r.encoding = "gb2312"
if indicator == "概念":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "class"
}
r = requests.get(url, params=params)
if indicator == "地域":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "area"
}
r = requests.get(url, params=params)
if indicator == "行业":
url = "http://money.finance.sina.com.cn/q/view/newFLJK.php"
params = {
"param": "industry"
}
r = requests.get(url, params=params)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{"):])
temp_df = pd.DataFrame([value.split(",") for key, value in json_data.items()])
temp_df.columns = [
"label",
"板块",
"公司家数",
"平均价格",
"涨跌额",
"涨跌幅",
"总成交量",
"总成交额",
"股票代码",
"个股-涨跌幅",
"个股-当前价",
"个股-涨跌额",
"股票名称",
]
temp_df['公司家数'] = pd.to_numeric(temp_df['公司家数'])
temp_df['平均价格'] = pd.to_numeric(temp_df['平均价格'])
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'])
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'])
temp_df['总成交量'] = pd.to_numeric(temp_df['总成交量'])
temp_df['总成交额'] = pd.to_numeric(temp_df['总成交额'])
temp_df['个股-涨跌幅'] = pd.to_numeric(temp_df['个股-涨跌幅'])
temp_df['个股-当前价'] = pd.to_numeric(temp_df['个股-当前价'])
temp_df['个股-涨跌额'] = pd.to_numeric(temp_df['个股-涨跌额'])
return temp_df
def stock_sector_detail(sector: str = "gn_gfgn") -> pd.DataFrame:
"""
新浪行业-板块行情-成份详情
http://finance.sina.com.cn/stock/sl/#area_1
:param sector: stock_sector_spot 返回的 label 值, choice of {"新浪行业", "概念", "地域", "行业"}; "启明星行业" 无详情
:type sector: str
:return: 指定 sector 的板块详情
:rtype: pandas.DataFrame
"""
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount"
params = {
"node": sector
}
r = requests.get(url, params=params)
total_num = int(r.json())
total_page_num = math.ceil(int(total_num) / 80)
big_df = pd.DataFrame()
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData"
for page in tqdm(range(1, total_page_num+1), leave=True):
params = {
"page": str(page),
"num": "80",
"sort": "symbol",
"asc": "1",
"node": sector,
"symbol": "",
"_s_r_a": "page",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text)
temp_df = pd.DataFrame(data_json)
big_df = big_df.append(temp_df, ignore_index=True)
big_df['trade'] = pd.to_numeric(big_df['trade'])
big_df['pricechange'] = pd.to_numeric(big_df['pricechange'])
big_df['changepercent'] = pd.to_numeric(big_df['changepercent'])
big_df['buy'] = pd.to_numeric(big_df['buy'])
big_df['sell'] = pd.to_numeric(big_df['sell'])
big_df['settlement'] = pd.to_numeric(big_df['settlement'])
big_df['open'] = | pd.to_numeric(big_df['open']) | pandas.to_numeric |
# Build a cleaned up historical emissions dataframe based on EGEDA
# Import relevant packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from openpyxl import Workbook
import xlsxwriter
import pandas.io.formats.excel
import glob
import re
# Path for OSeMOSYS output
path_output = './data/3_OSeMOSYS_output'
# Path for OSeMOSYS to EGEDA mapping
path_mapping = './data/2_Mapping_and_other'
# Where to save finalised dataframe
path_final = './data/4_Joined'
# OSeMOSYS results files
OSeMOSYS_filenames = glob.glob(path_output + "/*.xlsx")
# Load historical emissions
EGEDA_emissions = pd.read_csv('./data/1_EGEDA/EGEDA_FC_CO2_Emissions_years_2018.csv')
# Remove all aggregate variables as they're zero
agg_fuel = ['1_coal', '1_x_coal_thermal', '2_coal_products', '6_crude_oil_and_ngl', '6_x_ngls',
'7_petroleum_products', '7_x_jet_fuel', '7_x_other_petroleum_products', '8_gas', '16_others', '19_total']
EGEDA_emissions = EGEDA_emissions[~EGEDA_emissions['fuel_code'].isin(agg_fuel)].reset_index(drop = True)
########################## fuel_code aggregations ##########################
# lowest level
thermal_coal = ['1_2_other_bituminous_coal', '1_3_subbituminous_coal', '1_4_anthracite', '3_peat', '4_peat_products']
ngl = ['6_2_natural_gas_liquids', '6_3_refinery_feedstocks', '6_4_additives_oxygenates', '6_5_other_hydrocarbons']
other_petrol = ['7_12_white_spirit_sbp', '7_13_lubricants', '7_14_bitumen', '7_15_paraffin_waxes', '7_16_petroleum_coke', '7_17_other_products']
jetfuel = ['7_4_gasoline_type_jet_fuel', '7_5_kerosene_type_jet_fuel']
# First level
coal_fuels = ['1_1_coking_coal', '1_5_lignite', '1_x_coal_thermal']
coal_prod_fuels = ['2_1_coke_oven_coke', '2_2_coke_oven_gas', '2_3_blast_furnace_gas', '2_4_other_recovered_gases', '2_5_patent_fuel', '2_6_coal_tar', '2_7_bkb_pb']
oil_fuels = ['6_1_crude_oil', '6_x_ngls']
petrol_fuels = ['7_1_motor_gasoline', '7_2_aviation_gasoline', '7_3_naphtha', '7_x_jet_fuel', '7_6_kerosene', '7_7_gas_diesel_oil',
'7_8_fuel_oil', '7_9_lpg', '7_10_refinery_gas_not_liquefied', '7_11_ethane', '7_x_other_petroleum_products']
gas_fuels = ['8_1_natural_gas', '8_2_lng', '8_3_gas_works_gas']
other_fuels = ['16_1_biogas', '16_2_industrial_waste', '16_3_municipal_solid_waste_renewable', '16_4_municipal_solid_waste_nonrenewable', '16_5_biogasoline', '16_6_biodiesel',
'16_7_bio_jet_kerosene', '16_8_other_liquid_biofuels', '16_9_other_sources', '16_x_hydrogen']
# Total
total_fuels = ['1_coal', '2_coal_products', '5_oil_shale_and_oil_sands', '6_crude_oil_and_ngl', '7_petroleum_products', '8_gas', '9_nuclear', '10_hydro', '11_geothermal',
'12_solar', '13_tide_wave_ocean', '14_wind', '15_solid_biomass', '16_others', '17_electricity', '18_heat']
# item_code_new aggregations
tfc_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector', '17_nonenergy_use']
tfec_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector']
power_agg = ['9_1_main_activity_producer', '9_2_autoproducers']
# Change from negative to positive
neg_to_pos = ['9_x_power',
'9_1_main_activity_producer', '9_1_1_electricity_plants', '9_1_2_chp_plants', '9_1_3_heat_plants', '9_2_autoproducers',
'9_2_1_electricity_plants', '9_2_2_chp_plants', '9_2_3_heat_plants', '9_3_gas_processing_plants', '9_3_1_gas_works_plants',
'9_3_2_liquefaction_plants', '9_3_3_regasification_plants', '9_3_4_natural_gas_blending_plants', '9_3_5_gastoliquids_plants',
'9_4_oil_refineries', '9_5_coal_transformation', '9_5_1_coke_ovens', '9_5_2_blast_furnaces', '9_5_3_patent_fuel_plants',
'9_5_4_bkb_pb_plants', '9_5_5_liquefaction_coal_to_oil', '9_6_petrochemical_industry', '9_7_biofuels_processing',
'9_8_charcoal_processing', '9_9_nonspecified_transformation', '10_losses_and_own_use']
# Aggregations
EGEDA_aggregate = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in EGEDA_emissions['economy'].unique():
interim_df1 = EGEDA_emissions[EGEDA_emissions['economy'] == region]
thermal_agg = interim_df1[interim_df1['fuel_code'].isin(thermal_coal)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '1_x_coal_thermal').reset_index()
ngl_agg = interim_df1[interim_df1['fuel_code'].isin(ngl)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '6_x_ngls').reset_index()
oth_pet_agg = interim_df1[interim_df1['fuel_code'].isin(other_petrol)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '7_x_other_petroleum_products').reset_index()
jetfuel_agg = interim_df1[interim_df1['fuel_code'].isin(jetfuel)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '7_x_jet_fuel').reset_index()
interim_df2 = interim_df1.append([thermal_agg, ngl_agg, oth_pet_agg, jetfuel_agg]).reset_index(drop = True)
coal = interim_df2[interim_df2['fuel_code'].isin(coal_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '1_coal').reset_index()
coal_prod = interim_df2[interim_df2['fuel_code'].isin(coal_prod_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '2_coal_products').reset_index()
oil = interim_df2[interim_df2['fuel_code'].isin(oil_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '6_crude_oil_and_ngl').reset_index()
petrol = interim_df2[interim_df2['fuel_code'].isin(petrol_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '7_petroleum_products').reset_index()
gas = interim_df2[interim_df2['fuel_code'].isin(gas_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '8_gas').reset_index()
others = interim_df2[interim_df2['fuel_code'].isin(other_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '16_others').reset_index()
interim_df3 = interim_df2.append([coal, coal_prod, oil, petrol, gas, others]).reset_index(drop = True)
# Now add in the totals
total = interim_df3[interim_df3['fuel_code'].isin(total_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '19_total').reset_index()
interim_df4 = interim_df3.append([total]).reset_index(drop = True)
# Totals by sector aggregation
power_total = interim_df4[interim_df4['item_code_new'].isin(power_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '9_x_power').reset_index()
# tfc = interim_df5[interim_df5['item_code_new'].isin(tfc_agg)].groupby(['fuel_code'])\
# .sum().assign(item_code_new = '12_total_final_consumption').reset_index()
# tfec = interim_df5[interim_df5['item_code_new'].isin(tfec_agg)].groupby(['fuel_code'])\
# .sum().assign(item_code_new = '13_total_final_energy_consumption').reset_index()
interim_df5 = interim_df4.append([power_total]).reset_index(drop = True)
interim_df5['economy'] = region
EGEDA_aggregate = EGEDA_aggregate.append(interim_df5).reset_index(drop = True)
# Now change main activity producer and own use to positive
change_to_negative = EGEDA_aggregate[EGEDA_aggregate['item_code_new'].\
isin(neg_to_pos)].copy().reset_index(drop = True)
everything_else = EGEDA_aggregate[~EGEDA_aggregate['item_code_new'].\
isin(neg_to_pos)].copy().reset_index(drop = True)
s = change_to_negative.select_dtypes(include=[np.number]) * -1
change_to_negative[s.columns] = s
EGEDA_aggregate = everything_else.append(change_to_negative).reset_index(drop = True)
# Aggregate for demand sectors, power and own use and losses
EGEDA_aggregate2 = | pd.DataFrame() | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
| tm.assert_almost_equal(df.values, values) | pandas._testing.assert_almost_equal |
import json
import networkx as nx
import numpy as np
import os
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from config import logger, config
def read_profile_data():
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_df = pd.read_csv(config.profile_file)
profile_na.columns = profile_df.columns
profile_df = profile_df.append(profile_na)
return profile_df
def merge_raw_data():
tr_queries = pd.read_csv(config.train_query_file, parse_dates=['req_time'])
te_queries = pd.read_csv(config.test_query_file, parse_dates=['req_time'])
tr_plans = pd.read_csv(config.train_plan_file, parse_dates=['plan_time'])
te_plans = pd.read_csv(config.test_plan_file, parse_dates=['plan_time'])
tr_click = pd.read_csv(config.train_click_file)
trn = tr_queries.merge(tr_click, on='sid', how='left')
trn = trn.merge(tr_plans, on='sid', how='left')
trn = trn.drop(['click_time'], axis=1)
trn['click_mode'] = trn['click_mode'].fillna(0)
tst = te_queries.merge(te_plans, on='sid', how='left')
tst['click_mode'] = -1
df = pd.concat([trn, tst], axis=0, sort=False)
df = df.drop(['plan_time'], axis=1)
df = df.reset_index(drop=True)
df['weekday'] = df['req_time'].dt.weekday
df['day'] = df['req_time'].dt.day
df['hour'] = df['req_time'].dt.hour
df = df.drop(['req_time'], axis=1)
logger.info('total data size: {}'.format(df.shape))
logger.info('data columns: {}'.format(', '.join(df.columns)))
return df
def extract_plans(df):
plans = []
for sid, plan in tqdm(zip(df['sid'].values, df['plans'].values)):
try:
p = json.loads(plan)
for x in p:
x['sid'] = sid
plans.extend(p)
except:
pass
return pd.DataFrame(plans)
def generate_od_features(df):
feat = df[['o','d']].drop_duplicates()
feat = feat.merge(df.groupby('o')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='o')
feat.rename(columns={'day': 'o_nunique_day',
'hour': 'o_nunique_hour',
'pid': 'o_nunique_pid',
'click_mode': 'o_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby('d')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='d')
feat.rename(columns={'day': 'd_nunique_day',
'hour': 'd_nunique_hour',
'pid': 'd_nunique_pid',
'click_mode': 'd_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby(['o', 'd'])[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on=['o', 'd'])
feat.rename(columns={'day': 'od_nunique_day',
'hour': 'od_nunique_hour',
'pid': 'od_nunique_pid',
'click_mode': 'od_nunique_click'}, inplace=True)
return feat
def generate_pid_features(df):
feat = df.groupby('pid')[['hour', 'day']].nunique().reset_index()
feat.rename(columns={'hour': 'pid_nunique_hour', 'day': 'pid_nunique_day'}, inplace=True)
feat['nunique_hour_d_nunique_day'] = feat['pid_nunique_hour'] / feat['pid_nunique_day']
feat = feat.merge(df.groupby('pid')[['o', 'd']].nunique().reset_index(), how='left', on='pid')
feat.rename(columns={'o': 'pid_nunique_o', 'd': 'pid_nunique_d'}, inplace=True)
feat['nunique_o_d_nunique_d'] = feat['pid_nunique_o'] / feat['pid_nunique_d']
return feat
def generate_od_cluster_features(df):
G = nx.Graph()
G.add_nodes_from(df['o'].unique().tolist())
G.add_nodes_from(df['d'].unique().tolist())
edges = df[['o','d']].apply(lambda x: (x[0],x[1]), axis=1).tolist()
G.add_edges_from(edges)
cluster = nx.clustering(G)
cluster_df = pd.DataFrame([{'od': key, 'cluster': cluster[key]} for key in cluster.keys()])
return cluster_df
def gen_od_feas(data):
data['o1'] = data['o'].apply(lambda x: float(x.split(',')[0]))
data['o2'] = data['o'].apply(lambda x: float(x.split(',')[1]))
data['d1'] = data['d'].apply(lambda x: float(x.split(',')[0]))
data['d2'] = data['d'].apply(lambda x: float(x.split(',')[1]))
data = data.drop(['o', 'd'], axis=1)
return data
def gen_plan_feas(data):
n = data.shape[0]
mode_list_feas = np.zeros((n, 12))
max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_texts = []
for i, plan in tqdm(enumerate(data['plans'].values)):
try:
cur_plan_list = json.loads(plan)
except:
cur_plan_list = []
if len(cur_plan_list) == 0:
mode_list_feas[i, 0] = 1
first_mode[i] = 0
max_dist[i] = -1
min_dist[i] = -1
mean_dist[i] = -1
std_dist[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
min_dist_mode[i] = -1
max_dist_mode[i] = -1
min_price_mode[i] = -1
max_price_mode[i] = -1
min_eta_mode[i] = -1
max_eta_mode[i] = -1
mode_texts.append('word_null')
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in cur_plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
mode_texts.append(
' '.join(['word_{}'.format(mode) for mode in mode_list]))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feas[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_dist[i] = distance_list[distance_sort_idx[-1]]
min_dist[i] = distance_list[distance_sort_idx[0]]
mean_dist[i] = np.mean(distance_list)
std_dist[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
min_dist_mode[i] = mode_list[distance_sort_idx[0]]
max_price_mode[i] = mode_list[price_sort_idx[-1]]
min_price_mode[i] = mode_list[price_sort_idx[0]]
max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
min_eta_mode[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feas)
feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
feature_data['max_dist'] = max_dist
feature_data['min_dist'] = min_dist
feature_data['mean_dist'] = mean_dist
feature_data['std_dist'] = std_dist
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['max_dist_mode'] = max_dist_mode
feature_data['min_dist_mode'] = min_dist_mode
feature_data['max_price_mode'] = max_price_mode
feature_data['min_price_mode'] = min_price_mode
feature_data['max_eta_mode'] = max_eta_mode
feature_data['min_eta_mode'] = min_eta_mode
feature_data['first_mode'] = first_mode
logger.info('mode tfidf...')
tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
tfidf_vec = tfidf_enc.fit_transform(mode_texts)
svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
mode_svd = svd_enc.fit_transform(tfidf_vec)
mode_svd = pd.DataFrame(mode_svd)
mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]
data = pd.concat([data, feature_data, mode_svd], axis=1)
data = data.drop(['plans'], axis=1)
return data
def gen_profile_feas(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_fea_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def group_weekday_and_hour(row):
if row['weekday'] == 0 or row['weekday'] == 6:
w = 0
else:
w = row['weekday']
if row['hour'] > 7 and row['hour'] < 18: # 7:00 - 18:00
h = row['hour']
elif row['hour'] >= 18 and row['hour'] < 21: # 18:00 - 21:00
h = 1
elif row['hour'] >= 21 or row['hour'] < 6: # 21:00 - 6:00
h = 0
else: # 6:00 - 7:00
h = 2
return str(w) + '_' + str(h)
def gen_ratio_feas(data):
data['dist-d-eta'] = data['mean_dist'] / data['mean_eta']
data['price-d-dist'] = data['mean_price'] / data['mean_dist']
data['price-d-eta'] = data['mean_price'] / data['mean_eta']
data['o1-d-d1'] = data['o1'] / data['d1']
data['o2-d-d2'] = data['o2'] / data['d2']
return data
def gen_fly_dist_feas(data):
data['fly-dist'] = ((data['d1'] - data['o1'])**2 + (data['d2'] - data['o2'])**2)**0.5
data['fly-dist-d-dist'] = data['fly-dist'] / data['mean_dist']
data['fly-dist-d-eta'] = data['fly-dist'] / data['mean_eta']
data['price-d-fly-dist'] = data['mean_price'] / data['fly-dist']
return data
def gen_aggregate_profile_feas(data):
aggr = data.groupby('pid')['sid'].agg(['count'])
aggr.columns = ['%s_%s' % ('sid', col) for col in aggr.columns.values]
aggr = aggr.reset_index()
aggr.loc[aggr['pid'] == -1.0,'sid_count'] = 0 # reset in case pid == -1
data = data.merge(aggr, how='left', on=['pid'])
return data
def gen_pid_feat(data):
feat = pd.read_csv(config.pid_feature_file)
data = data.merge(feat, how='left', on='pid')
return data
def gen_od_feat(data):
feat = pd.read_csv(config.od_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
logger.info('sid shape={}'.format(sid.shape))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
logger.info('feature shape={}'.format(feat.shape))
logger.info('feature columns={}'.format(feat.columns))
data = data.merge(feat, how='left', on='sid')
click_cols = [c for c in feat.columns if c.endswith('click')]
data.drop(click_cols, axis=1, inplace=True)
return data
def gen_od_cluster_feat(data):
feat = pd.read_csv(config.od_cluster_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
f = feat.copy()
feat = sid.merge(feat, how='left', left_on='o', right_on='od').drop(['od','o'], axis=1)
feat.rename(columns={'cluster': 'o_cluster'}, inplace=True)
feat = feat.merge(f, how='left', left_on='d', right_on='od').drop(['od','d'], axis=1)
feat.rename(columns={'cluster': 'd_cluster'}, inplace=True)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_eq_feat(data):
data['o1-eq-d1'] = (data['o1'] == data['d1']).astype(int)
data['o2-eq-d2'] = (data['o2'] == data['d2']).astype(int)
data['o-eq-d'] = data['o1-eq-d1']*data['o2-eq-d2']
data['o1-m-o2'] = np.abs(data['o1'] - data['o2'])
data['d1-m-d2'] = np.abs(data['d1'] - data['d2'])
data['od_area'] = data['o1-m-o2']*data['d1-m-d2']
data['od_ratio'] = data['o1-m-o2']/data['d1-m-d2']
return data
def gen_od_mode_cnt_feat(data):
feat = pd.read_csv(config.od_mode_cnt_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weekday_hour_cnt_feat(data):
feat = pd.read_csv(config.weekday_hour_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','req_time'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','req_time'])
sid = pd.concat((tr_sid, te_sid))
sid['req_time'] = pd.to_datetime(sid['req_time'])
sid['hour'] = sid['req_time'].map(lambda x: x.hour)
sid['weekday'] = sid['req_time'].map(lambda x: x.weekday())
feat = sid.merge(feat, how='left', on=['hour','weekday']).drop(['hour','weekday','req_time'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_plan_agg_feat(data):
#feat = pd.read_csv(config.od_plan_agg_feature_file)
#tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d','req_time'])
#te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d', 'req_time'])
#sid = pd.concat((tr_sid, te_sid))
#sid['req_time'] = pd.to_datetime(sid['req_time'])
#sid['hour'] = sid['req_time'].map(lambda x: x.hour)
#feat = sid.merge(feat, how='left', on=['o','d','hour']).drop(['o','d','hour','req_time'], axis=1)
feat = pd.read_csv(config.od_plan_agg_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_feat(data):
feat = pd.read_csv(config.mode_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_stats_feat(data):
feat = pd.read_csv(config.od_stats_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_daily_plan_feat(data):
feat = pd.read_csv(config.daily_plan_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weather_feat(data):
feat = pd.read_csv(config.weather_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_pid_count_feat(data):
feat = pd.read_csv(config.od_pid_count_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_plan_ratio_feat(data):
feat = pd.read_csv(config.plan_ratio_file)
data = data.merge(feat, how='left', on='sid')
return data
def generate_f1(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f1')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
df = gen_od_feas(df)
df = gen_plan_feas(df)
df = gen_profile_feas(df)
df = gen_ratio_feas(df)
df = gen_fly_dist_feas(df)
df = gen_aggregate_profile_feas(df) # 0.6759966661470926
df = gen_pid_feat(df) # 0.6762996872664375
df = gen_od_feat(df) # without click count: 0.6780576865566392; with click count: 0.6795810670221226
df = gen_od_cluster_feat(df) # 0.6796523605372234
df = gen_od_eq_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def generate_f2(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f2')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
trn, tst = generate_f1(df)
df = pd.concat((trn, tst))
df = gen_od_mode_cnt_feat(df) # [+] fold #0: 0.6835031183515229
df = gen_weekday_hour_cnt_feat(df)
df = gen_od_plan_agg_feat(df)
df = gen_mode_feat(df)
#df = gen_mode_stats_feat(df)
## df = gen_weather_feat(df)
#df = gen_daily_plan_feat(df)
#df = gen_od_pid_count_feat(df)
## df = gen_plan_ratio_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def generate_f3(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f1')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
trn, tst = generate_f2(df)
df = pd.concat((trn, tst))
#df = gen_mode_stats_feat(df)
## df = gen_weather_feat(df)
#df = gen_daily_plan_feat(df)
#df = gen_od_pid_count_feat(df)
## df = gen_plan_ratio_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def get_train_test_features():
config.set_feature_name('f1')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f1.')
trn, tst = generate_f1(df)
logger.info('saving the training and test f1 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features2():
config.set_feature_name('f2')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f2.')
trn, tst = generate_f2(df)
logger.info('saving the training and test f2 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features2a():
config.set_feature_name('f2')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f2.')
trn, tst = generate_f2(df)
logger.info('saving the training and test f2 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/build/feature/od_coord_feature.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/input/data_set_phase1/var_dist_time.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/input/data_set_phase1/var_dist_min.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features3():
config.set_feature_name('f3')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = | pd.read_csv(config.test_feature_file) | pandas.read_csv |
from matplotlib.pyplot import *
from mlxtend.plotting import plot_confusion_matrix
from tensorflow.keras import backend
from sklearn.metrics import confusion_matrix
import scipy as sp
import numpy as np
import pandas as pd
import skimage.transform
import PIL
import scipy.ndimage as spi
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam,Adagrad, Adadelta
from tensorflow.keras.applications.inception_v3 import InceptionV3
import imageio
import model_evaluation_utils as meu
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras import optimizers
from tensorflow.keras.models import load_model
import argparse
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from tensorflow.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, BatchNormalization
from tensorflow.keras.models import model_from_json
# Esta función prepara un lote aleatorio del conjunto de datos.
def load_batch(dataset_df, batch_size = 25):
batch_df = dataset_df.loc[np.random.permutation(np.arange(0,
len(dataset_df)))[:batch_size],:]
return batch_df
# Esta función traza imágenes de muestra en un tamaño especificado y en una cuadrícula definida
def plot_batch(img_type, images_df, grid_width, grid_height, im_scale_x, im_scale_y):
f, ax = plt.subplots(grid_width, grid_height)
f.set_size_inches(12, 12)
img_idx = 0
for i in range(0, grid_width):
for j in range(0, grid_height):
ax[i][j].axis('off')
ax[i][j].set_title(images_df.iloc[img_idx]['clase'][:10])
ax[i][j].imshow(skimage.transform.resize(imageio.imread(DATASET_PATH + images_df.iloc[img_idx]['id'] + img_type),
(im_scale_x,im_scale_y)))
img_idx += 1
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0.25)
plt.show()
def datos_flo(tar_six,tar_siy,test_si,rand_sta,test_si2,rand_sta2):
# cargar dataset
train_data = np.array([img_to_array(load_img(img, target_size=(tar_six, tar_siy)))
for img in data_labels['image_path'].values.tolist()]).astype('float32')
# crear datasets de entrenamiento y test
x_train, x_test, y_train, y_test = train_test_split(train_data, target_labels,
test_size=test_si,
stratify=np.array(target_labels),
random_state=rand_sta)
# crear datasets de entrenamiento y validacion
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size=test_si2,
stratify=np.array(y_train),
random_state=rand_sta2)
return train_data, x_train, x_test, y_train, y_test, x_val, y_val
def data_gen(BATCH_SIZE, rot_ran, width_s_r, height_s_r, hor_flip, seed):
#Imagen data generation
BATCH_SIZE = 32
# Create train generator.
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=rot_ran,
width_shift_range=width_s_r,
height_shift_range=height_s_r,
horizontal_flip = hor_flip)
train_generator = train_datagen.flow(x_train, y_train_ohe, shuffle=False,
batch_size=BATCH_SIZE, seed=seed)
# Create validation generator
val_datagen = ImageDataGenerator(rescale = 1./255)
val_generator = train_datagen.flow(x_val, y_val_ohe, shuffle=False,
batch_size=BATCH_SIZE, seed=1)
return train_datagen, train_generator, val_datagen, val_generator
def tranf_learn(pesos,shapex,shapey,shapez,activat,activat2,loss,learning_rate,moment,BATCH_SIZE,epochs,save_file_path,save_json):
# Get the InceptionV3 model so we can do transfer learning
base_inception = InceptionV3(weights=pesos, include_top=False,
input_shape=(shapex, shapey, shapez))
out = base_inception.output
out = GlobalAveragePooling2D()(out)
out = Dense(1024, activation='relu')(out)
out = Dense(512, activation='relu')(out)
out = Flatten()(out)
out = Dense(512, activation="relu")(out)
#out = BatchNormalization()(out)
#out = Dropout(0.5)(out)
#out = Dense(512, activation="relu")(out)
#out = BatchNormalization()(out)
#out = Dropout(0.5)(out)
#out = Dense(512, activation="relu")(out)
#out = BatchNormalization()(out)
#out = Dropout(0.5)(out)
#out = Dense(512, activation="relu")(out)
# Add top layers on top of freezed (not re-trained) layers of VGG16
total_classes = y_train_ohe.shape[1]
#Este es un problema de clasificación binaria, por lo que utilizamos
# la función de activación sigmoidea en la capa de salida.
predictions = Dense(total_classes, activation=activat2)(out)
model = Model(inputs=base_inception.input, outputs=predictions)
opt1 = optimizers.SGD(lr=learning_rate, momentum=moment, nesterov=True)
opt2 = Adadelta(lr=learning_rate, rho=0.95)
opt3 = Adagrad(lr=0.0001)
#opt2 = Adagrad(lr=learning_rate, momentum=moment)
# Compile
model.compile(loss=loss, optimizer=opt1, metrics=["accuracy"])
# Imprime la arquitectura de inception V3
model.summary()
# Entrenar modelo
batch_size = BATCH_SIZE
train_steps_per_epoch = x_train.shape[0] // batch_size
val_steps_per_epoch = x_val.shape[0] // batch_size
history = model.fit_generator(train_generator,
steps_per_epoch=train_steps_per_epoch,
validation_data=val_generator,
validation_steps=val_steps_per_epoch,
epochs=epochs, verbose=1)
# serialize model to JSON
model_json = model.to_json()
with open(save_json, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(save_file_path)
print("Saved model to disk")
#model.save(save_file_path)
print(history.history.keys())
return history
def plot_eval(total_epchs,plot_name,space):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
t = f.suptitle('Inception V3 Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)
epoch_list = list(range(1,total_epchs))
ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy')
ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy')
ax1.set_xticks(np.arange(0, total_epchs, space))
ax1.set_ylabel('Accuracy Value')
ax1.set_xlabel('Epoch')
ax1.set_title('Accuracy')
l1 = ax1.legend(loc="best")
ax2.plot(epoch_list, history.history['loss'], label='Train Loss')
ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')
ax2.set_xticks(np.arange(0, total_epchs, space))
ax2.set_ylabel('Loss Value')
ax2.set_xlabel('Epoch')
ax2.set_title('Loss')
l2 = ax2.legend(loc="best")
plt.savefig(plot_name)
plt.show()
if __name__ == "__main__":
# Argumentos
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--epochs", type=int, required=True,
help="Número de epochs para entrenamiento")
ap.add_argument("-p", "--path", type=str, required=True,
help="Path directorio imagenes ej. '/home/jyosa/all_images'")
ap.add_argument("-l", "--labels", type=str, required=True,
help="Path archivo labels.csv ej. '/home/jyosa/labels.csv'")
ap.add_argument("-ex", "--ext", type=str, required=True,
help="Tipo de imágen. Ejemplo '.jpeg'")
args = vars(ap.parse_args())
np.random.seed(42)
#si no tiene los datos etiquetados en un archivo diferente use get_labels.py
DATASET_PATH = args["path"]
LABEL_PATH = args["labels"]
# cargar el conjunto de datos y visualizar datos de muestra
dataset_df = pd.read_csv(LABEL_PATH)
batch_df = load_batch(dataset_df, batch_size=36)
plot_batch(args["ext"], batch_df, grid_width=6, grid_height=6,
im_scale_x=64, im_scale_y=64)
#mirando cómo se ven las etiquetas del conjunto de datos para tener una idea de todas la posible eqtiquetas.
data_labels = pd.read_csv(LABEL_PATH)
target_labels = data_labels['clase']
print("Etiquetas encontradas: ", len(set(target_labels)))
data_labels.head()
#Lo que hacemos a continuación es agregar la ruta de imagen exacta para cada
# imagen presente en el disco usando el siguiente código. Esto nos ayudará a
# localizar y cargar fácilmente las imágenes durante el entrenamiento del modelo.
train_folder = DATASET_PATH
data_labels['image_path'] = data_labels.apply(lambda row: (train_folder + row["id"] + args["ext"] ),
axis=1)
data_labels.head()
#Preparar conjuntos de datos de entrenamiento, prueba y validación.
#Parámetros
target_size_x = 299
target_size_y = 299
test_size = 0.3
random_state = 42
test_size2 = 0.15
random_state2 = 42
train_data, x_train, x_test, y_train, y_test, x_val, y_val = datos_flo(target_size_x,target_size_y,test_size,random_state,test_size2,random_state2)
print('Tamaño inicial del conjunto de datos:', train_data.shape)
print('Tamaño inicial de conjuntos de datos de prueba y entrenamiento:', x_train.shape, x_test.shape)
print('Tamaño de conjuntos de datos de entrenamiento y validación:', x_train.shape, x_val.shape)
print('Tamaño de conjuntos de datos de entrenamiento, prueba y validación:\n', x_train.shape, x_test.shape, x_val.shape)
#conviertir las etiquetas de clase de texto en etiquetas codificadas one-hot
y_train_ohe = pd.get_dummies(y_train.reset_index(drop=True)).values
y_val_ohe = pd.get_dummies(y_val.reset_index(drop=True)).values
y_test_ohe = pd.get_dummies(y_test.reset_index(drop=True)).values
print(y_train_ohe.shape, y_test_ohe.shape, y_val_ohe.shape)
#Parámetros
batch_size = 32
rotation_range = 30
width_shift_range = 0.2
height_shift_range = 0.2
horizontal_flip = 'True'
seed = 25
train_datagen, train_generator, val_datagen, val_generator = data_gen(batch_size, rotation_range, width_shift_range, height_shift_range, horizontal_flip, seed)
#Transfer Learning with Google’s Inception V3 Model
#Parámetros
weights = 'imagenet'
input_shapex = 299
input_shapey = 299
input_shapez = 3
activation = 'relu'
activation_pred = 'sigmoid'
loss = "binary_crossentropy"
learning_rate = 0.0001
momentum = 0.8
batch_size = 32
epochs = args["epochs"]
model_path_save = 'models/transfer_inceptionV3.h5'
model_path_save_json = 'models/transfer_inceptionV3.json'
history = tranf_learn(weights,input_shapex,input_shapey,input_shapez,activation,activation_pred,loss,learning_rate,momentum,batch_size,epochs,model_path_save,model_path_save_json)
# Evaluación Inception V3
#Parámetros
num_epochs = epochs + 1
Plot_name = 'Permormance_1.png'
space = 50
plot_eval(num_epochs,Plot_name,space)
#Evaluación del modelo
base_model = InceptionV3(weights='imagenet', include_top=False)
#model = load_model(model_path_save)
# load json and create model
json_file = open(model_path_save_json, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(model_path_save)
print("Loaded model from disk")
# scaling test features
x_test /= 255.
# getting model predictions
test_predictions = model.predict(x_test)
labels_ohe_names = pd.get_dummies(target_labels, sparse=True)
predictions = pd.DataFrame(test_predictions, columns=labels_ohe_names.columns)
predictions = list(predictions.idxmax(axis=1))
test_labels = list(y_test)
#evaluate model performance
meu.get_metrics(true_labels=test_labels,
predicted_labels=predictions)
meu.display_classification_report(true_labels=test_labels,
predicted_labels=predictions,
classes=list(labels_ohe_names.columns))
# print(meu.display_confusion_matrix_pretty(true_labels=test_labels,
# predicted_labels=predictions,
# classes=list(labels_ohe_names.columns)))
font = {
'family': 'Times New Roman',
'size': 12
}
matplotlib.rc('font', **font)
mat = confusion_matrix(test_labels, predictions)
plot_confusion_matrix(conf_mat=mat, figsize=(4, 4), class_names = list(labels_ohe_names.columns), show_normed=False)
grid_width = 5
grid_height = 5
f, ax = plt.subplots(grid_width, grid_height)
f.set_size_inches(15, 15)
batch_size = 25
dataset = x_test
labels_ohe_names = | pd.get_dummies(target_labels, sparse=True) | pandas.get_dummies |
import logging
import numpy as np
import copy
import pandas as pd
from juneau.utils.utils import sigmoid, jaccard_similarity
from juneau.search.search_prov_code import ProvenanceSearch
class Sorted_State:
def __init__(self, query, tables):
self.name = query.name # the query name
self.tables = tables # a list of table names
def save_a_state(self, state, previous_state, case_id):
if previous_state == None:
self.state = state
return
if case_id == 0:
domains = ["col_sim_ub", "new_row_ub", "prov_sim"]
elif case_id == 1:
domains = ["row_sim_ub", "new_col_ub", "prov_sim"]
elif case_id == 2:
domains = ["col_sim_ub", "row_sim_ub", "nan_diff_ub", "prov_sim"]
for domain in domains:
state[domain] = previous_state[domain].append(state[domain])
self.state = state # a dictionary of feature:dataframe
class Sorted_Components:
def __init__(self, mappings, all_tables, all_graphs, previous_state = None):
self.tables = all_tables
self.comp_tables = []
self.cache_tables = []
self.Graphs = all_graphs
self.mappings = mappings
self.pre_state = previous_state
if self.pre_state != None:
for tn in self.tables.keys():
if tn in self.pre_state.tables:
self.cache_tables.append(tn)
else:
self.comp_tables.append(tn)
else:
self.comp_tables = list(self.tables.keys())
def provenance_score(self, query, alpha):
prov_class = ProvenanceSearch(self.Graphs)
# Compute Provenance Similarity
logging.info("Compute Provenance Similarity!")
table_prov_rank = prov_class.search_score_rank(query.node, self.comp_tables)
table_prov_score = {}
for i, j in table_prov_rank:
table_prov_score["rtable" + i] = j
for i in self.cache_tables:
table_prov_score[i] = self.pre_state.state["prov_sim"]["score"][i]
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
continue
if i not in table_prov_score:
prov_score = 0
else:
prov_score = 1 - sigmoid(table_prov_score[i])
tname = i[6:]
if tname not in self.mappings:
inital_mapping = {}
else:
inital_mapping = self.mappings[tname]
prov_score = alpha * prov_score
rank_candidate.append((i, prov_score, inital_mapping))
return rank_candidate
def col_similarity_ub(self, query, beta):
rank_candiate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candiate.append((i, self.pre_state.state["col_sim_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
tableB = self.tables[i]
if tname not in self.mappings:
col_sim_ub = 0
else:
col_sim_ub = float(beta) * \
float(min(tableA.shape[1], tableB.shape[1]))\
/float(tableA.shape[1] + tableB.shape[1] - len(self.mappings[tname]))
rank_candiate.append((i, col_sim_ub))
return rank_candiate
def row_similarity_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candidate.append((i, self.pre_state.state["row_sim_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
tableB = self.tables[i]
if tname not in self.mappings:
row_sim_ub = 0
else:
row_sim_ub = 0
initial_mapping = self.mappings[tname]
for key in initial_mapping.keys():
Avalue = tableA[key].dropna().keys()
Bvalue = tableB[initial_mapping[key]].dropna().values
try:
row_sim = jaccard_similarity(Avalue, Bvalue)
except:
row_sim = 0
if row_sim > row_sim_ub:
row_sim_ub = row_sim
rank_candidate.append((i, beta * row_sim_ub))
return rank_candidate
def new_col_rate_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
tname = i[6:]
tableA = query.value
if tname not in self.mappings:
inital_mapping = {}
new_data_rate = 1
else:
inital_mapping = self.mappings[tname]
new_data_rate = float(tableA.shape[1] - len(inital_mapping))/float(tableA.shape[1])
new_data_rate_ub = float(beta) * new_data_rate
rank_candidate.append((i, new_data_rate_ub))
return rank_candidate
def new_row_rate_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candidate.append((i, self.pre_state.state["new_row_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
tableB = self.tables[i]
if tname not in self.mappings:
new_data_rate = 0
else:
new_data_rate = 0
inital_mapping = self.mappings[tname]
for key in inital_mapping.keys():
Alen = tableA[key].dropna().values
Blen = tableB[inital_mapping[key]].dropna().values
try:
new_data_rate_temp = float(1) - float(len(np.intersect1d(Alen, Blen))) / float(len(Alen))
except:
new_data_rate_temp = 0
if new_data_rate_temp > new_data_rate:
new_data_rate = new_data_rate_temp
rank_candidate.append((i, beta * new_data_rate))
return rank_candidate
def nan_delta_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candidate.append((i, self.pre_state.state["nan_diff_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
if tname not in self.mappings:
nan_ub = 0
else:
key_indexA = list(self.mappings[tname].keys())
ub_zero_diff = tableA[key_indexA].isnull().sum().sum()
value_num = tableA.shape[0] * tableA.shape[1]
nan_ub = float(ub_zero_diff)/float(value_num)
rank_candidate.append((i, beta * nan_ub))
return rank_candidate
def merge_additional_training(self, query, alpha, beta):
ub1 = sorted(self.col_similarity_ub(query, beta), key = lambda d:d[1], reverse=True)
ub2 = sorted(self.new_row_rate_ub(query, 1 - alpha - beta), key = lambda d:d[1], reverse=True)
#print(ub1[:5])
#print(ub2[:5])
ub = ub1[0][1] + ub2[0][1]
rank_candidate = self.provenance_score(query, alpha)
old_rank_candidate = copy.deepcopy(rank_candidate)
rank_candidate = []
for i in range(len(old_rank_candidate)):
rank_candidate.append((old_rank_candidate[i][0], old_rank_candidate[i][1] + ub, old_rank_candidate[i][2]))
u1_df = pd.DataFrame([pair[1] for pair in ub1], index = [pair[0] for pair in ub1], columns = ["score"])
u2_df = pd.DataFrame([pair[1] for pair in ub2], index = [pair[0] for pair in ub2], columns = ["score"])
u3_df = | pd.DataFrame([pair[1] for pair in old_rank_candidate], index = [pair[0] for pair in old_rank_candidate], columns = ["score"]) | pandas.DataFrame |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in_old(slide, label, root_dir):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 1000
else:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['BMI'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# pair tiles of 10x, 5x, 2.5x of the same area
def paired_tile_ids_in(slide, label, root_dir, age=None, BMI=None):
dira = os.path.isdir(root_dir + 'level1')
dirb = os.path.isdir(root_dir + 'level2')
dirc = os.path.isdir(root_dir + 'level3')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 2000
else:
fac = 1000
ids = []
for level in range(1, 4):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 1]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 2]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 3]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
idsa['age'] = age
idsa['BMI'] = BMI
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
return idsa
# Balance CPTAC and TCGA tiles in each class
def balance(pdls, cls):
balanced = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for i in range(cls):
ref = pdls.loc[pdls['label'] == i]
CPTAC = ref[~ref['slide'].str.contains("TCGA")]
TCGA = ref[ref['slide'].str.contains("TCGA")]
if CPTAC.shape[0] != 0 and TCGA.shape[0] != 0:
ratio = (CPTAC.shape[0])/(TCGA.shape[0])
if ratio < 0.2:
TCGA = TCGA.sample(int(5*CPTAC.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
elif ratio > 5:
CPTAC = CPTAC.sample(int(5*TCGA.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
balanced = | pd.concat([balanced, ref], sort=False) | pandas.concat |
import sys
import argparse
import itertools
from collections import defaultdict
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='+')
parser.add_argument('--epoch', action='store_true')
parser.add_argument('--cheat', action='store_true')
parser.add_argument('--merge', action='store_true')
parser.add_argument('--momentum', type=float, default=0.0)
args = parser.parse_args()
if not args.cheat:
criterion = lambda x: x[0] # val acc
else:
criterion = lambda x: x[1] # test acc
def get_acc(path):
accs = []
with open(path) as fd:
moving_val_acc = 0
for line in fd:
prefix = 'Epoch: '
if line.startswith(prefix):
tokens = line.split(' ')
val_acc = float(tokens[-4][:-1])
test_acc = float(tokens[-1])
moving_val_acc = args.momentum * moving_val_acc + (1 - args.momentum) * val_acc
accs.append((moving_val_acc, test_acc))
if len(accs) == 0:
return None
return accs
data = defaultdict(list)
for path in sorted(args.paths, key=lambda x: x.split('-')[::-1]):
tokens = path[:-4].split('-')
*_, fold, dim, bs, dropout, seed = tokens
model = tokens[0]
dataset = '-'.join(tokens[1:-5])
acc = get_acc(path)
if acc is None:
continue
parts = model.split('/')
if len(parts) == 1:
run = 'default'
else:
run = parts[-2]
model = parts[-1]
if model == 'mean':
model = 'sum'
for i, (val_acc, test_acc) in enumerate(acc):
data['seed'].append(seed)
data['run'].append(run)
data['model'].append(model)
data['dataset'].append(dataset)
data['fold'].append(fold)
data['dim'].append(int(dim))
data['bs'].append(int(bs))
data['dropout'].append(float(dropout))
data['epoch'].append(int(i))
data['val_acc'].append(val_acc * 100)
data['test_acc'].append(test_acc * 100)
data = pd.DataFrame.from_dict(data)
if args.epoch:
dgcnn_epochs = {
'MUTAG': 300,
'PTC_MR': 200,
'NCI1': 200,
'PROTEINS': 100,
}
for dataset, epoch in dgcnn_epochs.items():
applies_to_dataset = data['dataset'] == dataset
matches_epoch = data['epoch'] == epoch
data = data[~applies_to_dataset | matches_epoch]
# average over folds
df = data.groupby(['seed', 'run', 'model', 'dataset', 'dim', 'bs', 'dropout', 'epoch']).mean()
if args.merge:
df = df.mean(axis=1)
# group hyperparams away
df2 = df.groupby(['seed', 'run', 'model', 'dataset'])
if not args.cheat:
df2 = df2['val_acc']
elif not args.merge:
df2 = df2['test_acc']
idx = df2.idxmax()
best = df.loc[idx]
print(best.to_string())
best = best.reset_index(level=['epoch', 'bs', 'dim', 'dropout'])
df3 = best.groupby(['run', 'model', 'dataset'])
mean = df3.mean()
std = df3[['test_acc', 'val_acc', 'epoch']].std().rename(index=str, columns={'test_acc': 'test_std', 'val_acc': 'val_std', 'epoch': 'epoch_std'})
best_across_seeds = | pd.concat([mean, std], axis=1) | pandas.concat |
from pyswip import Prolog
from gym_sokoban.envs.sokoban_env_fast import SokobanEnvFast
import time
import pandas as pd
def flatten(container):
for i in container:
if isinstance(i, (list,tuple)):
for j in flatten(i):
yield j
else:
yield i
def map_moves(move):
if move == "up":
return 3
elif move == "down":
return 2
elif move == "left":
return 0
elif move == "right":
return 1
def find_solution(size=8, num_boxes=2, time_limit=10, seed=0):
dim_room = (size, size)
env = SokobanEnvFast(dim_room=dim_room,
num_boxes=num_boxes,
seed=seed,
penalty_for_step=0)
# The encoding of the board is described in README
board = env.reset()
wall = board[:,:,0] # this is a one-hot encoding of walls
# For readibility first we deal with tops and then with rights
tops = []
for i in range(dim_room[0]):
for j in range(dim_room[1]-1):
if wall[i,j] == 0 and wall[i,j+1] == 0:
tops.append("top(x{}y{},x{}y{})".format(i,j,i,j+1))
rights = []
for i in range(dim_room[0]-1):
for j in range(dim_room[1]):
if wall[i,j] == 0 and wall[i+1,j] == 0:
rights.append("right(x{}y{},x{}y{})".format(i,j,i+1,j))
boxes_initial_locations = board[:,:,4]
boxes_initial = []
for i in range(dim_room[0]):
for j in range(dim_room[1]):
if boxes_initial_locations[i,j] == 1:
boxes_initial.append("box(x{}y{})".format(i,j))
boxes_target_locations = board[:,:,2] + board[:,:,3] + board[:,:,6]
boxes_target = []
for i in range(dim_room[0]):
for j in range(dim_room[1]):
if boxes_target_locations[i,j] == 1:
boxes_target.append("solution(x{}y{})".format(i,j))
sokoban_initial_location = board[:,:,5] + board[:,:,6]
for i in range(dim_room[0]):
for j in range(dim_room[1]):
if sokoban_initial_location[i,j] == 1:
sokoban_string = "sokoban(x{}y{})".format(i,j)
break
tops_string = "[" + ','.join(tops) + ']'
rights_string = "[" + ','.join(rights) + ']'
boxes_initial_string = "[" + ','.join(boxes_initial) + ']'
boxes_target_string = "[" + ','.join(boxes_target) + ']'
prolog = Prolog()
prolog.consult("sokoban.pl")
query = "call_with_time_limit({},solve([{},{},{},{},{}],Solution))".format(time_limit,
tops_string,
rights_string,
boxes_initial_string,
boxes_target_string,
sokoban_string)
print('board: \n', board)
# print('zeros: \n', board[:,:,0])
# print('ones: \n', board[:,:,1])
# print('twos: \n', board[:,:,2])
# print('threes: \n', board[:,:,3])
# print('fours: \n', board[:,:,4])
# print('fives: \n', board[:, :, 5])
# print('sixes: \n', board[:, :, 6])
print(query)
try:
result = list(prolog.query(query))
rewards = []
print('result: ', result)
for i, r in enumerate(result):
solution = r['Solution']
actions = []
for index in range(len(solution)):
move = str(solution[index]).split()[-1]
move = move[:-1]
action = map_moves(move)
actions.append(action)
observation, reward, done, info = env.step(action)
rewards.append(reward)
print("Last return {}".format(rewards[-1]))
if rewards[-1] >= 10:
return 1, actions
return 0, []
except:
print('dupa')
return 0, []
if __name__ == "__main__":
number_of_trials = 100
time_start = time.time()
df = | pd.DataFrame(columns=['seed', 'actions']) | pandas.DataFrame |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import stacknet_funcs as funcs
from math import ceil
import numpy as np
from os import chdir
folder = "F:/Nerdy Stuff/Kaggle/Talking data/data/"
sparse_array_path = 'F:/Nerdy Stuff/Kaggle/Talking data/sparse matricies/'
predictors = []
run = "test"
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint8',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
if run == "train":
file = folder + "train.csv"
cols = ['ip', 'app', 'device', 'os', 'channel', 'click_time', 'is_attributed']
print('loading %s data...' % (run))
base_df = pd.read_csv(file, parse_dates=['click_time'], low_memory=True,dtype=dtypes, usecols=cols)
if run == "test":
print('loading %s data...' % (run))
file = folder + "test.csv"
cols = ['ip', 'app', 'device', 'os', 'channel', 'click_time']
base_df = pd.read_csv(file, parse_dates=['click_time'], dtype=dtypes, usecols=cols)
rows = base_df.shape[0]
iters = 100
iter_rows = ceil(rows/iters)
X_ttl = np.empty((0, 31))
y_ttl = np.empty((0, ))
start_point = 0
for i in list(range(start_point, iters)):
print("Cut # %i" % (i))
if i == 0:
start = i * iter_rows
end = (i + 1) * iter_rows
print("start row = %s and end row = %s" % (start, end))
df = base_df.iloc[start:end, :].copy()
else:
start = i * iter_rows + 1
end = (i + 1) * iter_rows
print("start row = %s and end row = %s" % (start, end))
df = base_df.iloc[start:end, :].copy()
df['hour'] = pd.to_datetime(df.click_time).dt.hour.astype('int8')
df['day'] = pd.to_datetime(df.click_time).dt.day.astype('int8')
df['minute'] = pd.to_datetime(df.click_time).dt.minute.astype('int8')
predictors.append('minute')
df['second'] = | pd.to_datetime(df.click_time) | pandas.to_datetime |
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
import plotly_express
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# Read in data
batter_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_bat_data.csv")
del batter_data['Age']
print(len(batter_data))
print(batter_data.head())
pitcher_data = | pd.read_csv("~/Desktop/MLB_FA/Data/fg_pitch_data.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import torch
from scipy.io import arff
from abc import ABC, abstractmethod
from torch.utils.data import DataLoader, TensorDataset
class BaseADDataset(ABC):
"""Anomaly detection dataset base class."""
def __init__(self, root: str):
super().__init__()
self.root = root # root path to data
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = None # tuple with original class labels that define the normal class
self.outlier_classes = None # tuple with original class labels that define the outlier class
self.train_set = None # must be of type torch.utils.data.Dataset
self.test_set = None # must be of type torch.utils.data.Dataset
@abstractmethod
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (
DataLoader, DataLoader):
"""Implement data loaders of type torch.utils.data.DataLoader for train_set and test_set."""
pass
def __repr__(self):
return self.__class__.__name__
class TorchvisionDataset(BaseADDataset):
"""TorchvisionDataset class for datasets already implemented in torchvision.datasets."""
def __init__(self, root: str):
super().__init__(root)
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (
DataLoader, DataLoader):
train_loader = DataLoader(dataset=self.train_set, batch_size=batch_size, shuffle=shuffle_train,
num_workers=num_workers)
test_loader = DataLoader(dataset=self.test_set, batch_size=batch_size, shuffle=shuffle_test,
num_workers=num_workers)
return train_loader, test_loader
class SAD_Dataset(TorchvisionDataset):
def __init__(self, root: str, normal_class):
super().__init__(root)
self.n_classes = 2
self.normal_class = normal_class
# train set
#load data file path
url1_train = 'data/sad/SpokenArabicDigitsDimension1_TRAIN.arff'
url2_train = 'data/sad/SpokenArabicDigitsDimension2_TRAIN.arff'
url3_train = 'data/sad/SpokenArabicDigitsDimension3_TRAIN.arff'
url4_train = 'data/sad/SpokenArabicDigitsDimension4_TRAIN.arff'
url5_train = 'data/sad/SpokenArabicDigitsDimension5_TRAIN.arff'
url6_train = 'data/sad/SpokenArabicDigitsDimension6_TRAIN.arff'
url7_train = 'data/sad/SpokenArabicDigitsDimension7_TRAIN.arff'
url8_train = 'data/sad/SpokenArabicDigitsDimension8_TRAIN.arff'
url9_train = 'data/sad/SpokenArabicDigitsDimension9_TRAIN.arff'
url10_train = 'data/sad/SpokenArabicDigitsDimension10_TRAIN.arff'
url11_train = 'data/sad/SpokenArabicDigitsDimension11_TRAIN.arff'
url12_train = 'data/sad/SpokenArabicDigitsDimension12_TRAIN.arff'
url13_train = 'data/sad/SpokenArabicDigitsDimension13_TRAIN.arff'
# get x and y as dataframe
x_dim1_train, target_train = get_data(url1_train)
x_dim2_train, __ = get_data(url2_train)
x_dim3_train, __ = get_data(url3_train)
x_dim4_train, __ = get_data(url4_train)
x_dim5_train, __ = get_data(url5_train)
x_dim6_train, __ = get_data(url6_train)
x_dim7_train, __ = get_data(url7_train)
x_dim8_train, __ = get_data(url8_train)
x_dim9_train, __ = get_data(url9_train)
x_dim10_train, __ = get_data(url10_train)
x_dim11_train, __ = get_data(url11_train)
x_dim12_train, __ = get_data(url12_train)
x_dim13_train, __ = get_data(url13_train)
x_dim1_train = get_features(x_dim1_train)
x_dim2_train = get_features(x_dim2_train)
x_dim3_train = get_features(x_dim3_train)
x_dim4_train = get_features(x_dim4_train)
x_dim5_train = get_features(x_dim5_train)
x_dim6_train = get_features(x_dim6_train)
x_dim7_train = get_features(x_dim7_train)
x_dim8_train = get_features(x_dim8_train)
x_dim9_train = get_features(x_dim9_train)
x_dim10_train = get_features(x_dim10_train)
x_dim11_train = get_features(x_dim11_train)
x_dim12_train = get_features(x_dim12_train)
x_dim13_train = get_features(x_dim13_train)
# combine 13 dimensions of x
x_train = np.dstack([x_dim1_train, x_dim2_train, x_dim3_train, x_dim4_train, x_dim5_train, x_dim6_train, x_dim7_train, x_dim8_train, x_dim9_train, x_dim10_train, x_dim11_train, x_dim12_train, x_dim13_train])
# process output y and produce index
y_train, index_train = get_target(target_train, normal_class)
# train only on normal data, extracting normal data
x_final_train, y_final_train, index_final_train = get_training_set(x_train, y_train, index_train)
# print("size: ", x_final_train.shape)
train_set = TensorDataset(torch.Tensor(x_final_train), torch.Tensor(y_final_train), torch.Tensor(index_final_train))
self.train_set = train_set
# set up testing set
url1_test = 'data/sad/SpokenArabicDigitsDimension1_TEST.arff'
url2_test = 'data/sad/SpokenArabicDigitsDimension2_TEST.arff'
url3_test = 'data/sad/SpokenArabicDigitsDimension3_TEST.arff'
url4_test = 'data/sad/SpokenArabicDigitsDimension4_TEST.arff'
url5_test = 'data/sad/SpokenArabicDigitsDimension5_TEST.arff'
url6_test = 'data/sad/SpokenArabicDigitsDimension6_TEST.arff'
url7_test = 'data/sad/SpokenArabicDigitsDimension7_TEST.arff'
url8_test = 'data/sad/SpokenArabicDigitsDimension8_TEST.arff'
url9_test = 'data/sad/SpokenArabicDigitsDimension9_TEST.arff'
url10_test = 'data/sad/SpokenArabicDigitsDimension10_TEST.arff'
url11_test = 'data/sad/SpokenArabicDigitsDimension11_TEST.arff'
url12_test = 'data/sad/SpokenArabicDigitsDimension12_TEST.arff'
url13_test = 'data/sad/SpokenArabicDigitsDimension13_TEST.arff'
x_dim1_test, target_test = get_data(url1_test)
x_dim2_test, __ = get_data(url2_test)
x_dim3_test, __ = get_data(url3_test)
x_dim4_test, __ = get_data(url4_test)
x_dim5_test, __ = get_data(url5_test)
x_dim6_test, __ = get_data(url6_test)
x_dim7_test, __ = get_data(url7_test)
x_dim8_test, __ = get_data(url8_test)
x_dim9_test, __ = get_data(url9_test)
x_dim10_test, __ = get_data(url10_test)
x_dim11_test, __ = get_data(url11_test)
x_dim12_test, __ = get_data(url12_test)
x_dim13_test, __ = get_data(url13_test)
x_dim1_test = get_features(x_dim1_test)
x_dim2_test = get_features(x_dim2_test)
x_dim3_test = get_features(x_dim3_test)
x_dim4_test = get_features(x_dim4_test)
x_dim5_test = get_features(x_dim5_test)
x_dim6_test = get_features(x_dim6_test)
x_dim7_test = get_features(x_dim7_test)
x_dim8_test = get_features(x_dim8_test)
x_dim9_test = get_features(x_dim9_test)
x_dim10_test = get_features(x_dim10_test)
x_dim11_test = get_features(x_dim11_test)
x_dim12_test = get_features(x_dim12_test)
x_dim13_test = get_features(x_dim13_test)
x_final_test = np.dstack([x_dim1_test, x_dim2_test, x_dim3_test, x_dim4_test, x_dim5_test, x_dim6_test, x_dim7_test, x_dim8_test, x_dim9_test, x_dim10_test, x_dim11_test, x_dim12_test, x_dim13_test])
y_final_test, index_test = get_target(target_test, normal_class)
test_set = TensorDataset(torch.Tensor(x_final_test), torch.Tensor(y_final_test), torch.Tensor(index_test))
self.test_set = test_set
def get_data(url):
"""
input: path to arff data file
This function loads the arff file, then converts into dataframe.
The dataframe is then split into x and y.
output: x is dataframe object without the last column. y is series.
"""
loaded = arff.loadarff(url)
df = pd.DataFrame(loaded[0])
# dropping the last column of dataframe
# it is still a dataframe object
x = df.iloc[:, :-1].to_numpy()
# getting last column as series, not dataframe object
# as dataframe object is using iloc[:, -1:]
y = df.iloc[:, -1]
return x, y
def get_features(x):
"""
input: unprocessed features data
This function replaces missing values with zeroes.
output: processed features data
"""
for i in range(0, len(x)):
for j in range(0, 93):
if | pd.isna(x[i][j]) | pandas.isna |
import argparse
import pandas
import csv
import numpy as np
import random
random.seed(a=666)
import logging
import os
import plotly
from plotly.graph_objs import *
from plotly.offline import plot
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def parseArgs():
"""
CLI inputs
"""
log.info('Parsing command-line arguments...')
parser = argparse.ArgumentParser(description='Explore enzyme-screening variability historically')
subparsers = parser.add_subparsers(help='Choose mode of operation')
plot_parser = subparsers.add_parser('only-plots')
full_parser = subparsers.add_parser('daisy-chain')
full_parser.add_argument('stats',
help='Path to summary_stats.csv produced by sequin')
full_parser.add_argument('output_old',
help='Path to previous output')
full_parser.add_argument('output_new',
help='Path to new output')
full_parser.add_argument('enzymes',
help='Name of enzyme(s) to retrieve data for, space-separated',
nargs='*')
plot_parser.add_argument('output_old',
help='Path to previous output')
plot_parser.add_argument('output_path',
help='Path for plots')
args = parser.parse_args()
return args
#return args.stats, args.output_old, args.output_new, args.enzymes
def get_fieldnames(arr):
"""
Get fieldnames from recarray
"""
common_column_names = ['esPlate', 'chip', 'ESID', 'plateId', 'plateName', 'limsExptName', 'runCode',
'analogType', 'analogConcentration', 'movieTime',
'platform', 'chipLotId', 'chipType', 'esComment', 'laserPower']
fn1 = []
for val in arr.dtype.names:
n = val.split('test')
if len(n) == 1:
fn1.append(n)
else:
if len(n[0]) > 0:
n[0] = n[0][0:-1]
fn1.append(''.join(n))
fn2 = []
for val in arr.dtype.names:
n = val.split('control')
if len(n) == 1:
fn2.append(n)
else:
if len(n[0]) > 0:
n[0] = n[0][0:-1]
fn2.append(''.join(n))
ans = np.intersect1d(fn1, fn2)
r = []
for col in ans:
if type(col) is list:
r.append(col[0])
else:
r.append(col)
f = []
for col in r:
s = col.split('.')
if ('delta' in s) | ('ratio' in s):
continue
else:
f.append(col)
return f
def retrieve_new_data(stats, enzymes):
"""
Retrieve summary stats about chosen enzymes
"""
log.info('Retrieving new summary stats...')
summary_stats = pandas.read_csv(stats, dtype=str)
data = []
places = ['test', 'control']
for enzyme in enzymes:
for place in places: # look at test and controls
df = summary_stats[summary_stats[place + 'Enzyme'] == enzyme]
df = df.drop_duplicates(subset=[place+'Enzyme', place+'Template'])
if df.values.any():
df = df.to_records()
columns = df.dtype.names
fieldnames = get_fieldnames(df)
for row in df:
data_row = {}
for column in columns:
if column in fieldnames:
data_row[column] = row[column]
else:
new_column = column
m = column.split(place)
if len(m) > 1:
if len(m[0]) > 0:
m[0] = m[0][0:-1]
new_column = ''.join(m)
else:
new_column = ''.join(m)
data_row[new_column] = row[column]
data.append(data_row)
data = | pandas.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
try:
import numpy as np
except ImportError:
pass
from utool import util_inject
(print, rrr, profile) = util_inject.inject2(__name__)
def nx_topsort_nodes(graph, nodes):
import utool as ut
node_rank = ut.nx_topsort_rank(graph, nodes)
node_idx = ut.rebase_labels(node_rank)
sorted_nodes = ut.take(nodes, node_idx)
return sorted_nodes
def nx_topsort_rank(graph, nodes=None):
"""
graph = inputs.exi_graph.reverse()
nodes = flat_node_order_
"""
import networkx as nx
import utool as ut
topsort = list(nx.topological_sort(graph))
node_to_top_rank = ut.make_index_lookup(topsort)
toprank = ut.dict_take(node_to_top_rank, nodes)
return toprank
def nx_common_descendants(graph, node1, node2):
import networkx as nx
descendants1 = nx.descendants(graph, node1)
descendants2 = nx.descendants(graph, node2)
common_descendants = set.intersection(descendants1, descendants2)
return common_descendants
def nx_common_ancestors(graph, node1, node2):
import networkx as nx
ancestors1 = nx.ancestors(graph, node1)
ancestors2 = nx.ancestors(graph, node2)
common_ancestors = set.intersection(ancestors1, ancestors2)
return common_ancestors
def nx_make_adj_matrix(G):
import utool as ut
nodes = list(G.nodes())
node2_idx = ut.make_index_lookup(nodes)
edges = list(G.edges())
edge2_idx = ut.partial(ut.dict_take, node2_idx)
uv_list = ut.lmap(edge2_idx, edges)
A = np.zeros((len(nodes), len(nodes)))
A[tuple(np.array(uv_list).T)] = 1
return A
def nx_transitive_reduction(G, mode=1):
"""
References:
https://en.wikipedia.org/wiki/Transitive_reduction#Computing_the_reduction_using_the_closure
http://dept-info.labri.fr/~thibault/tmp/0201008.pdf
http://stackoverflow.com/questions/17078696/im-trying-to-perform-the-transitive-reduction-of-directed-graph-in-python
CommandLine:
python -m utool.util_graph nx_transitive_reduction --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> import networkx as nx
>>> G = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> G = testdata_graph()[1]
>>> G_tr = nx_transitive_reduction(G, mode=1)
>>> G_tr2 = nx_transitive_reduction(G, mode=1)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> G_ = nx.dag.transitive_closure(G)
>>> pt.show_nx(G , pnum=(1, 5, 1), fnum=1)
>>> pt.show_nx(G_tr , pnum=(1, 5, 2), fnum=1)
>>> pt.show_nx(G_tr2 , pnum=(1, 5, 3), fnum=1)
>>> pt.show_nx(G_ , pnum=(1, 5, 4), fnum=1)
>>> pt.show_nx(nx.dag.transitive_closure(G_tr), pnum=(1, 5, 5), fnum=1)
>>> ut.show_if_requested()
"""
import utool as ut
import networkx as nx
has_cycles = not nx.is_directed_acyclic_graph(G)
if has_cycles:
# FIXME: this does not work for cycle graphs.
# Need to do algorithm on SCCs
G_orig = G
G = nx.condensation(G_orig)
nodes = list(G.nodes())
node2_idx = ut.make_index_lookup(nodes)
# For each node u, perform DFS consider its set of (non-self) children C.
# For each descendant v, of a node in C, remove any edge from u to v.
if mode == 1:
G_tr = G.copy()
for parent in G_tr.nodes():
# Remove self loops
if G_tr.has_edge(parent, parent):
G_tr.remove_edge(parent, parent)
# For each child of the parent
for child in list(G_tr.successors(parent)):
# Preorder nodes includes its argument (no added complexity)
for gchild in list(G_tr.successors(child)):
# Remove all edges from parent to non-child descendants
for descendant in nx.dfs_preorder_nodes(G_tr, gchild):
if G_tr.has_edge(parent, descendant):
G_tr.remove_edge(parent, descendant)
if has_cycles:
# Uncondense graph
uncondensed_G_tr = G.__class__()
mapping = G.graph['mapping']
uncondensed_G_tr.add_nodes_from(mapping.keys())
inv_mapping = ut.invert_dict(mapping, unique_vals=False)
for u, v in G_tr.edges():
u_ = inv_mapping[u][0]
v_ = inv_mapping[v][0]
uncondensed_G_tr.add_edge(u_, v_)
for key, path in inv_mapping.items():
if len(path) > 1:
directed_cycle = list(ut.itertwo(path, wrap=True))
uncondensed_G_tr.add_edges_from(directed_cycle)
G_tr = uncondensed_G_tr
else:
def make_adj_matrix(G):
edges = list(G.edges())
edge2_idx = ut.partial(ut.dict_take, node2_idx)
uv_list = ut.lmap(edge2_idx, edges)
A = np.zeros((len(nodes), len(nodes)))
A[tuple(np.array(uv_list).T)] = 1
return A
G_ = nx.dag.transitive_closure(G)
A = make_adj_matrix(G)
B = make_adj_matrix(G_)
#AB = A * B
#AB = A.T.dot(B)
AB = A.dot(B)
#AB = A.dot(B.T)
A_and_notAB = np.logical_and(A, np.logical_not(AB))
tr_uvs = np.where(A_and_notAB)
#nodes = G.nodes()
edges = list(zip(*ut.unflat_take(nodes, tr_uvs)))
G_tr = G.__class__()
G_tr.add_nodes_from(nodes)
G_tr.add_edges_from(edges)
if has_cycles:
# Uncondense graph
uncondensed_G_tr = G.__class__()
mapping = G.graph['mapping']
uncondensed_G_tr.add_nodes_from(mapping.keys())
inv_mapping = ut.invert_dict(mapping, unique_vals=False)
for u, v in G_tr.edges():
u_ = inv_mapping[u][0]
v_ = inv_mapping[v][0]
uncondensed_G_tr.add_edge(u_, v_)
for key, path in inv_mapping.items():
if len(path) > 1:
directed_cycle = list(ut.itertwo(path, wrap=True))
uncondensed_G_tr.add_edges_from(directed_cycle)
G_tr = uncondensed_G_tr
return G_tr
def nx_source_nodes(graph):
import networkx as nx
topsort_iter = nx.dag.topological_sort(graph)
source_iter = (node for node in topsort_iter
if graph.in_degree(node) == 0)
return source_iter
def nx_sink_nodes(graph):
import networkx as nx
topsort_iter = nx.dag.topological_sort(graph)
sink_iter = (node for node in topsort_iter
if graph.out_degree(node) == 0)
return sink_iter
def nx_to_adj_dict(graph):
import utool as ut
adj_dict = ut.ddict(list)
for u, edges in graph.adjacency():
adj_dict[u].extend(list(edges.keys()))
adj_dict = dict(adj_dict)
return adj_dict
def nx_from_adj_dict(adj_dict, cls=None):
if cls is None:
import networkx as nx
cls = nx.DiGraph
nodes = list(adj_dict.keys())
edges = [(u, v) for u, adj in adj_dict.items() for v in adj]
graph = cls()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
def nx_dag_node_rank(graph, nodes=None):
"""
Returns rank of nodes that define the "level" each node is on in a
topological sort. This is the same as the Graphviz dot rank.
Ignore:
simple_graph = ut.simplify_graph(exi_graph)
adj_dict = ut.nx_to_adj_dict(simple_graph)
import plottool as pt
pt.qt4ensure()
pt.show_nx(graph)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]}
>>> import networkx as nx
>>> nodes = [2, 1, 5]
>>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph)
>>> graph = f_graph.reverse()
>>> #ranks = ut.nx_dag_node_rank(graph, nodes)
>>> ranks = ut.nx_dag_node_rank(graph, nodes)
>>> result = ('ranks = %r' % (ranks,))
>>> print(result)
ranks = [3, 2, 1]
"""
import utool as ut
source = list(ut.nx_source_nodes(graph))[0]
longest_paths = dict([(target, dag_longest_path(graph, source, target))
for target in graph.nodes()])
node_to_rank = ut.map_dict_vals(len, longest_paths)
if nodes is None:
return node_to_rank
else:
ranks = ut.dict_take(node_to_rank, nodes)
return ranks
def nx_all_nodes_between(graph, source, target, data=False):
"""
Find all nodes with on paths between source and target.
"""
import utool as ut
import networkx as nx
if source is None:
# assume there is a single source
sources = list(ut.nx_source_nodes(graph))
assert len(sources) == 1, (
'specify source if there is not only one')
source = sources[0]
if target is None:
# assume there is a single source
sinks = list(ut.nx_sink_nodes(graph))
assert len(sinks) == 1, (
'specify sink if there is not only one')
target = sinks[0]
all_simple_paths = list(nx.all_simple_paths(graph, source, target))
nodes = list(ut.union_ordered(ut.flatten(all_simple_paths)))
return nodes
def nx_all_simple_edge_paths(G, source, target, cutoff=None, keys=False,
data=False):
"""
Returns each path from source to target as a list of edges.
This function is meant to be used with MultiGraphs or MultiDiGraphs.
When ``keys`` is True each edge in the path is returned with its unique key
identifier. In this case it is possible to distinguish between different
paths along different edges between the same two nodes.
Derived from simple_paths.py in networkx
"""
if cutoff is None:
cutoff = len(G) - 1
if cutoff < 1:
return
import six
visited_nodes = [source]
visited_edges = []
edge_stack = [iter(G.edges(source, keys=keys, data=data))]
while edge_stack:
children_edges = edge_stack[-1]
child_edge = six.next(children_edges, None)
if child_edge is None:
edge_stack.pop()
visited_nodes.pop()
if len(visited_edges) > 0:
visited_edges.pop()
elif len(visited_nodes) < cutoff:
child_node = child_edge[1]
if child_node == target:
yield visited_edges + [child_edge]
elif child_node not in visited_nodes:
visited_nodes.append(child_node)
visited_edges.append(child_edge)
edge_stack.append(iter(G.edges(child_node, keys=keys, data=data)))
else:
for edge in [child_edge] + list(children_edges):
if edge[1] == target:
yield visited_edges + [edge]
edge_stack.pop()
visited_nodes.pop()
if len(visited_edges) > 0:
visited_edges.pop()
def nx_delete_node_attr(graph, key, nodes=None):
removed = 0
keys = [key] if not isinstance(key, list) else key
for key in keys:
if nodes is None:
nodes = list(graph.nodes())
for node in nodes:
try:
del graph.node[node][key]
removed += 1
except KeyError:
pass
return removed
def nx_delete_edge_attr(graph, key, edges=None):
removed = 0
keys = [key] if not isinstance(key, list) else key
for key in keys:
if graph.is_multigraph():
if edges is None:
edges = list(graph.edges(keys=graph.is_multigraph()))
for edge in edges:
u, v, k = edge
try:
del graph[u][v][k][key]
removed += 1
except KeyError:
pass
else:
if edges is None:
edges = list(graph.edges())
for edge in graph.edges():
u, v = edge
try:
del graph[u][v][key]
removed += 1
except KeyError:
pass
return removed
def nx_delete_None_edge_attr(graph, edges=None):
removed = 0
if graph.is_multigraph():
if edges is None:
edges = list(graph.edges(keys=graph.is_multigraph()))
for edge in edges:
u, v, k = edge
data = graph[u][v][k]
for key in data.keys():
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
else:
if edges is None:
edges = list(graph.edges())
for edge in graph.edges():
u, v = edge
data = graph[u][v]
for key in data.keys():
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
return removed
def nx_delete_None_node_attr(graph, nodes=None):
removed = 0
if nodes is None:
nodes = list(graph.nodes())
for node in graph.nodes():
data = graph.node[node]
for key in data.keys():
try:
if data[key] is None:
del data[key]
removed += 1
except KeyError:
pass
return removed
def nx_set_default_node_attributes(graph, key, val):
import networkx as nx
unset_nodes = [n for n, d in graph.nodes(data=True) if key not in d]
if isinstance(val, dict):
values = {n: val[n] for n in unset_nodes if n in val}
else:
values = {n: val for n in unset_nodes}
nx.set_node_attributes(graph, key, values)
def nx_set_default_edge_attributes(graph, key, val):
import networkx as nx
unset_edges = [(u, v) for u, v, d in graph.edges(data=True) if key not in d]
if isinstance(val, dict):
values = {e: val[e] for e in unset_edges if e in val}
else:
values = {e: val for e in unset_edges}
nx.set_edge_attributes(graph, key, values)
def nx_get_default_node_attributes(graph, key, default=None):
import networkx as nx
import utool as ut
node_list = list(graph.nodes())
partial_attr_dict = nx.get_node_attributes(graph, key)
attr_list = ut.dict_take(partial_attr_dict, node_list, default)
attr_dict = dict(zip(node_list, attr_list))
return attr_dict
def nx_from_matrix(weight_matrix, nodes=None, remove_self=True):
import networkx as nx
import utool as ut
import numpy as np
if nodes is None:
nodes = list(range(len(weight_matrix)))
weight_list = weight_matrix.ravel()
flat_idxs_ = np.arange(weight_matrix.size)
multi_idxs_ = np.unravel_index(flat_idxs_, weight_matrix.shape)
# Remove 0 weight edges
flags = np.logical_not(np.isclose(weight_list, 0))
weight_list = ut.compress(weight_list, flags)
multi_idxs = ut.compress(list(zip(*multi_idxs_)), flags)
edge_list = ut.lmap(tuple, ut.unflat_take(nodes, multi_idxs))
if remove_self:
flags = [e1 != e2 for e1, e2 in edge_list]
edge_list = ut.compress(edge_list, flags)
weight_list = ut.compress(weight_list, flags)
graph = nx.Graph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edge_list)
label_list = ['%.2f' % w for w in weight_list]
nx.set_edge_attributes(graph, 'weight', dict(zip(edge_list,
weight_list)))
nx.set_edge_attributes(graph, 'label', dict(zip(edge_list,
label_list)))
return graph
def nx_ensure_agraph_color(graph):
""" changes colors to hex strings on graph attrs """
from plottool import color_funcs
import plottool as pt
#import six
def _fix_agraph_color(data):
try:
orig_color = data.get('color', None)
alpha = data.get('alpha', None)
color = orig_color
if color is None and alpha is not None:
color = [0, 0, 0]
if color is not None:
color = pt.ensure_nonhex_color(color)
#if isinstance(color, np.ndarray):
# color = color.tolist()
color = list(color_funcs.ensure_base255(color))
if alpha is not None:
if len(color) == 3:
color += [int(alpha * 255)]
else:
color[3] = int(alpha * 255)
color = tuple(color)
if len(color) == 3:
data['color'] = '#%02x%02x%02x' % color
else:
data['color'] = '#%02x%02x%02x%02x' % color
except Exception as ex:
import utool as ut
ut.printex(ex, keys=['color', 'orig_color', 'data'])
raise
for node, node_data in graph.nodes(data=True):
data = node_data
_fix_agraph_color(data)
for u, v, edge_data in graph.edges(data=True):
data = edge_data
_fix_agraph_color(data)
def nx_makenode(graph, name, **attrkw):
if 'size' in attrkw:
attrkw['width'], attrkw['height'] = attrkw.pop('size')
graph.add_node(name, **attrkw)
return name
def nx_edges(graph, keys=False, data=False):
if graph.is_multigraph():
edges = graph.edges(keys=keys, data=data)
else:
edges = graph.edges(data=data)
#if keys:
# edges = [e[0:2] + (0,) + e[:2] for e in edges]
return edges
def dag_longest_path(graph, source, target):
"""
Finds the longest path in a dag between two nodes
"""
import networkx as nx
if source == target:
return [source]
allpaths = nx.all_simple_paths(graph, source, target)
longest_path = []
for l in allpaths:
if len(l) > len(longest_path):
longest_path = l
return longest_path
def testdata_graph():
r"""
Returns:
tuple: (graph, G)
CommandLine:
python -m utool.util_graph --exec-testdata_graph --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> (graph, G) = testdata_graph()
>>> import plottool as pt
>>> ut.ensure_pylab_qt4()
>>> pt.show_nx(G, layout='pygraphviz')
>>> ut.show_if_requested()
"""
import networkx as nx
import utool as ut
# Define adjacency list
graph = {
'a': ['b'],
'b': ['c', 'f', 'e'],
'c': ['g', 'd'],
'd': ['c', 'h'],
'e': ['a', 'f'],
'f': ['g'],
'g': ['f'],
'h': ['g', 'd'],
'i': ['j'],
'j': [],
}
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a', 'e'],
'e': ['c'],
}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['a']}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['a']}
graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['a'], 'f': ['c']}
#graph = {'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['e'], 'e': ['b']}
graph = {'a': ['b', 'c', 'd'], 'e': ['d'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': []} # double pair in non-scc
graph = {'a': ['b', 'c', 'd'], 'e': ['d'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': ['e']} # double pair in non-scc
#graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'f'], 'f': ['d', 'e'], 'b': [], 'c': [], 'd': ['e']} # double pair in non-scc
#graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'c'], 'f': ['d', 'e'], 'b': ['e'], 'c': ['e'], 'd': ['e']} # double pair in non-scc
graph = {'a': ['b', 'c', 'd'], 'e': ['d', 'c'], 'f': ['d', 'e'], 'b': ['e'], 'c': ['e', 'b'], 'd': ['e']} # double pair in non-scc
# Extract G = (V, E)
nodes = list(graph.keys())
edges = ut.flatten([[(v1, v2) for v2 in v2s] for v1, v2s in graph.items()])
G = nx.DiGraph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
if False:
G.remove_node('e')
del graph['e']
for val in graph.values():
try:
val.remove('e')
except ValueError:
pass
return graph, G
def dict_depth(dict_, accum=0):
if not isinstance(dict_, dict):
return accum
return max([dict_depth(val, accum + 1)
for key, val in dict_.items()])
def edges_to_adjacency_list(edges):
import utool as ut
children_, parents_ = list(zip(*edges))
parent_to_children = ut.group_items(parents_, children_)
#to_leafs = {tablename: path_to_leafs(tablename, parent_to_children)}
return parent_to_children
def get_ancestor_levels(graph, tablename):
import networkx as nx
import utool as ut
root = nx.topological_sort(graph)[0]
reverse_edges = [(e2, e1) for e1, e2 in graph.edges()]
child_to_parents = ut.edges_to_adjacency_list(reverse_edges)
to_root = ut.paths_to_root(tablename, root, child_to_parents)
from_root = ut.reverse_path(to_root, root, child_to_parents)
ancestor_levels_ = ut.get_levels(from_root)
ancestor_levels = ut.longest_levels(ancestor_levels_)
return ancestor_levels
def get_descendant_levels(graph, tablename):
#import networkx as nx
import utool as ut
parent_to_children = ut.edges_to_adjacency_list(graph.edges())
to_leafs = ut.path_to_leafs(tablename, parent_to_children)
descendant_levels_ = ut.get_levels(to_leafs)
descendant_levels = ut.longest_levels(descendant_levels_)
return descendant_levels
def paths_to_root(tablename, root, child_to_parents):
"""
CommandLine:
python -m utool.util_graph --exec-paths_to_root:0
python -m utool.util_graph --exec-paths_to_root:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> child_to_parents = {
>>> 'chip': ['dummy_annot'],
>>> 'chipmask': ['dummy_annot'],
>>> 'descriptor': ['keypoint'],
>>> 'fgweight': ['keypoint', 'probchip'],
>>> 'keypoint': ['chip'],
>>> 'notch': ['dummy_annot'],
>>> 'probchip': ['dummy_annot'],
>>> 'spam': ['fgweight', 'chip', 'keypoint']
>>> }
>>> root = 'dummy_annot'
>>> tablename = 'fgweight'
>>> to_root = paths_to_root(tablename, root, child_to_parents)
>>> result = ut.repr3(to_root)
>>> print(result)
{
'keypoint': {
'chip': {
'dummy_annot': None,
},
},
'probchip': {
'dummy_annot': None,
},
}
Example:
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> root = u'annotations'
>>> tablename = u'Notch_Tips'
>>> child_to_parents = {
>>> 'Block_Curvature': [
>>> 'Trailing_Edge',
>>> ],
>>> 'Has_Notch': [
>>> 'annotations',
>>> ],
>>> 'Notch_Tips': [
>>> 'annotations',
>>> ],
>>> 'Trailing_Edge': [
>>> 'Notch_Tips',
>>> ],
>>> }
>>> to_root = paths_to_root(tablename, root, child_to_parents)
>>> result = ut.repr3(to_root)
>>> print(result)
"""
if tablename == root:
return None
parents = child_to_parents[tablename]
return {parent: paths_to_root(parent, root, child_to_parents)
for parent in parents}
def path_to_leafs(tablename, parent_to_children):
children = parent_to_children[tablename]
if len(children) == 0:
return None
return {child: path_to_leafs(child, parent_to_children)
for child in children}
def get_allkeys(dict_):
import utool as ut
if not isinstance(dict_, dict):
return []
subkeys = [[key] + get_allkeys(val)
for key, val in dict_.items()]
return ut.unique_ordered(ut.flatten(subkeys))
def traverse_path(start, end, seen_, allkeys, mat):
import utool as ut
if seen_ is None:
seen_ = set([])
index = allkeys.index(start)
sub_indexes = np.where(mat[index])[0]
if len(sub_indexes) > 0:
subkeys = ut.take(allkeys, sub_indexes)
# subkeys_ = ut.take(allkeys, sub_indexes)
# subkeys = [subkey for subkey in subkeys_
# if subkey not in seen_]
# for sk in subkeys:
# seen_.add(sk)
if len(subkeys) > 0:
return {subkey: traverse_path(subkey, end, seen_, allkeys, mat)
for subkey in subkeys}
return None
def reverse_path(dict_, root, child_to_parents):
"""
CommandLine:
python -m utool.util_graph --exec-reverse_path --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> child_to_parents = {
>>> 'chip': ['dummy_annot'],
>>> 'chipmask': ['dummy_annot'],
>>> 'descriptor': ['keypoint'],
>>> 'fgweight': ['keypoint', 'probchip'],
>>> 'keypoint': ['chip'],
>>> 'notch': ['dummy_annot'],
>>> 'probchip': ['dummy_annot'],
>>> 'spam': ['fgweight', 'chip', 'keypoint']
>>> }
>>> to_root = {
>>> 'fgweight': {
>>> 'keypoint': {
>>> 'chip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> }
>>> reversed_ = reverse_path(to_root, 'dummy_annot', child_to_parents)
>>> result = ut.repr3(reversed_)
>>> print(result)
{
'dummy_annot': {
'chip': {
'keypoint': {
'fgweight': None,
},
},
'probchip': {
'fgweight': None,
},
},
}
"""
# Hacky but illustrative
# TODO; implement non-hacky version
allkeys = get_allkeys(dict_)
mat = np.zeros((len(allkeys), len(allkeys)))
for key in allkeys:
if key != root:
for parent in child_to_parents[key]:
rx = allkeys.index(parent)
cx = allkeys.index(key)
mat[rx][cx] = 1
end = None
seen_ = set([])
reversed_ = {root: traverse_path(root, end, seen_, allkeys, mat)}
return reversed_
def get_levels(dict_, n=0, levels=None):
r"""
DEPCIRATE
Args:
dict_ (dict_): a dictionary
n (int): (default = 0)
levels (None): (default = None)
CommandLine:
python -m utool.util_graph --test-get_levels --show
python3 -m utool.util_graph --test-get_levels --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> from_root = {
>>> 'dummy_annot': {
>>> 'chip': {
>>> 'keypoint': {
>>> 'fgweight': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'fgweight': None,
>>> },
>>> },
>>> }
>>> dict_ = from_root
>>> n = 0
>>> levels = None
>>> levels_ = get_levels(dict_, n, levels)
>>> result = ut.repr2(levels_, nl=1)
>>> print(result)
[
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
['fgweight'],
]
"""
if levels is None:
levels_ = [[] for _ in range(dict_depth(dict_))]
else:
levels_ = levels
if dict_ is None:
return []
for key in dict_.keys():
levels_[n].append(key)
for val in dict_.values():
get_levels(val, n + 1, levels_)
return levels_
def longest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-longest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = longest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint'],
['fgweight'],
]
"""
return shortest_levels(levels_[::-1])[::-1]
# seen_ = set([])
# new_levels = []
# for level in levels_[::-1]:
# new_level = [item for item in level if item not in seen_]
# seen_ = seen_.union(set(new_level))
# new_levels.append(new_level)
# new_levels = new_levels[::-1]
# return new_levels
def shortest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-shortest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = shortest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
]
"""
seen_ = set([])
new_levels = []
for level in levels_:
new_level = [item for item in level if item not in seen_]
seen_ = seen_.union(set(new_level))
if len(new_level) > 0:
new_levels.append(new_level)
new_levels = new_levels
return new_levels
def simplify_graph(graph):
"""
strips out everything but connectivity
Args:
graph (nx.Graph):
Returns:
nx.Graph: new_graph
CommandLine:
python3 -m utool.util_graph simplify_graph --show
python2 -m utool.util_graph simplify_graph --show
python2 -c "import networkx as nx; print(nx.__version__)"
python3 -c "import networkx as nx; print(nx.__version__)"
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> import networkx as nx
>>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> new_graph = simplify_graph(graph)
>>> result = ut.repr2(list(new_graph.edges()))
>>> #adj_list = sorted(list(nx.generate_adjlist(new_graph)))
>>> #result = ut.repr2(adj_list)
>>> print(result)
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)]
['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3']
"""
import utool as ut
nodes = sorted(list(graph.nodes()))
node_lookup = ut.make_index_lookup(nodes)
if graph.is_multigraph():
edges = list(graph.edges(keys=True))
else:
edges = list(graph.edges())
new_nodes = ut.take(node_lookup, nodes)
if graph.is_multigraph():
new_edges = [(node_lookup[e[0]], node_lookup[e[1]], e[2], {}) for e in edges]
else:
new_edges = [(node_lookup[e[0]], node_lookup[e[1]]) for e in edges]
cls = graph.__class__
new_graph = cls()
new_graph.add_nodes_from(new_nodes)
new_graph.add_edges_from(new_edges)
return new_graph
def level_order(graph):
import utool as ut
node_to_level = ut.nx_dag_node_rank(graph)
#source = ut.nx_source_nodes(graph)[0]
#longest_paths = dict([(target, dag_longest_path(graph, source, target))
# for target in graph.nodes()])
#node_to_level = ut.map_dict_vals(len, longest_paths)
grouped = ut.group_items(node_to_level.keys(), node_to_level.values())
levels = ut.take(grouped, range(1, len(grouped) + 1))
return levels
def merge_level_order(level_orders, topsort):
"""
Merge orders of individual subtrees into a total ordering for
computation.
>>> level_orders = {
>>> 'multi_chip_multitest': [['dummy_annot'], ['chip'], ['multitest'],
>>> ['multitest_score'], ],
>>> 'multi_fgweight_multitest': [ ['dummy_annot'], ['chip', 'probchip'],
>>> ['keypoint'], ['fgweight'], ['multitest'], ['multitest_score'], ],
>>> 'multi_keypoint_nnindexer': [ ['dummy_annot'], ['chip'], ['keypoint'],
>>> ['nnindexer'], ['multitest'], ['multitest_score'], ],
>>> 'normal': [ ['dummy_annot'], ['chip', 'probchip'], ['keypoint'],
>>> ['fgweight'], ['spam'], ['multitest'], ['multitest_score'], ],
>>> 'nwise_notch_multitest_1': [ ['dummy_annot'], ['notch'], ['multitest'],
>>> ['multitest_score'], ],
>>> 'nwise_notch_multitest_2': [ ['dummy_annot'], ['notch'], ['multitest'],
>>> ['multitest_score'], ],
>>> 'nwise_notch_notchpair_1': [ ['dummy_annot'], ['notch'], ['notchpair'],
>>> ['multitest'], ['multitest_score'], ],
>>> 'nwise_notch_notchpair_2': [ ['dummy_annot'], ['notch'], ['notchpair'],
>>> ['multitest'], ['multitest_score'], ],
>>> }
>>> topsort = [u'dummy_annot', u'notch', u'probchip', u'chip', u'keypoint',
>>> u'fgweight', u'nnindexer', u'spam', u'notchpair', u'multitest',
>>> u'multitest_score']
>>> print(ut.repr3(ut.merge_level_order(level_orders, topsort)))
EG2:
level_orders = {u'normal': [[u'dummy_annot'], [u'chip', u'probchip'], [u'keypoint'], [u'fgweight'], [u'spam']]}
topsort = [u'dummy_annot', u'probchip', u'chip', u'keypoint', u'fgweight', u'spam']
"""
import utool as ut
if False:
compute_order = []
level_orders = ut.map_dict_vals(ut.total_flatten, level_orders)
level_sets = ut.map_dict_vals(set, level_orders)
for tablekey in topsort:
compute_order.append((tablekey, [groupkey for groupkey, set_ in level_sets.items() if tablekey in set_]))
return compute_order
else:
# Do on common subgraph
import itertools
# Pointer to current level.: Start at the end and
# then work your way up.
main_ptr = len(topsort) - 1
stack = []
#from six.moves import zip_longest
keys = list(level_orders.keys())
type_to_ptr = {key: -1 for key in keys}
print('level_orders = %s' % (ut.repr3(level_orders),))
for count in itertools.count(0):
print('----')
print('count = %r' % (count,))
ptred_levels = []
for key in keys:
levels = level_orders[key]
ptr = type_to_ptr[key]
try:
level = tuple(levels[ptr])
except IndexError:
level = None
ptred_levels.append(level)
print('ptred_levels = %r' % (ptred_levels,))
print('main_ptr = %r' % (main_ptr,))
# groupkeys, groupxs = ut.group_indices(ptred_levels)
# Group keys are tablenames
# They point to the (type) of the input
# num_levelkeys = len(ut.total_flatten(ptred_levels))
groupkeys, groupxs = ut.group_indices(ptred_levels)
main_idx = None
while main_idx is None and main_ptr >= 0:
target = topsort[main_ptr]
print('main_ptr = %r' % (main_ptr,))
print('target = %r' % (target,))
# main_idx = ut.listfind(groupkeys, (target,))
# if main_idx is None:
possible_idxs = [idx for idx, keytup in enumerate(groupkeys) if keytup is not None and target in keytup]
if len(possible_idxs) == 1:
main_idx = possible_idxs[0]
else:
main_idx = None
if main_idx is None:
main_ptr -= 1
if main_idx is None:
print('break I')
break
found_groups = ut.apply_grouping(keys, groupxs)[main_idx]
print('found_groups = %r' % (found_groups,))
stack.append((target, found_groups))
for k in found_groups:
type_to_ptr[k] -= 1
if len(found_groups) == len(keys):
main_ptr -= 1
if main_ptr < 0:
print('break E')
break
print('stack = %s' % (ut.repr3(stack),))
print('have = %r' % (sorted(ut.take_column(stack, 0)),))
print('need = %s' % (sorted(ut.total_flatten(level_orders.values())),))
compute_order = stack[::-1]
return compute_order
def convert_multigraph_to_graph(G):
"""
For each duplicate edge make a dummy node.
TODO: preserve data, keys, and directedness
"""
import utool as ut
edge_list = list(G.edges())
node_list = list(G.nodes())
dupitem_to_idx = ut.find_duplicate_items(edge_list)
node_to_freq = ut.ddict(lambda: 0)
remove_idxs = ut.flatten(dupitem_to_idx.values())
ut.delete_items_by_index(edge_list, remove_idxs)
for dup_edge in dupitem_to_idx.keys():
freq = len(dupitem_to_idx[dup_edge])
u, v = dup_edge[0:2]
pair_node = dup_edge
pair_nodes = [pair_node + tuple([count]) for count in range(freq)]
for pair_node in pair_nodes:
node_list.append(pair_node)
for node in dup_edge:
node_to_freq[node] += freq
edge_list.append((u, pair_node))
edge_list.append((pair_node, v))
import networkx as nx
G2 = nx.DiGraph()
G2.add_edges_from(edge_list)
G2.add_nodes_from(node_list)
return G2
def subgraph_from_edges(G, edge_list, ref_back=True):
"""
Creates a networkx graph that is a subgraph of G
defined by the list of edges in edge_list.
Requires G to be a networkx MultiGraph or MultiDiGraph
edge_list is a list of edges in either (u,v) or (u,v,d) form
where u and v are nodes comprising an edge,
and d would be a dictionary of edge attributes
ref_back determines whether the created subgraph refers to back
to the original graph and therefore changes to the subgraph's
attributes also affect the original graph, or if it is to create a
new copy of the original graph.
References:
http://stackoverflow.com/questions/16150557/nx-subgraph-from-edges
"""
# TODO: support multi-di-graph
sub_nodes = list({y for x in edge_list for y in x[0:2]})
#edge_list_no_data = [edge[0:2] for edge in edge_list]
multi_edge_list = [edge[0:3] for edge in edge_list]
if ref_back:
G_sub = G.subgraph(sub_nodes)
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
else:
G_sub = G.subgraph(sub_nodes).copy()
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
return G_sub
def all_multi_paths(graph, source, target, data=False):
"""
Returns specific paths along multi-edges from the source to this table.
Multipaths are identified by edge keys.
Returns all paths from source to target. This function treats multi-edges
as distinct and returns the key value in each edge tuple that defines a
path.
Example:
>>> from dtool.depcache_control import * # NOQA
>>> from utool.util_graph import * # NOQA
>>> from dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> graph = depc.graph
>>> source = depc.root
>>> target = 'notchpair'
>>> path_list1 = ut.all_multi_paths(graph, depc.root, 'notchpair')
>>> path_list2 = ut.all_multi_paths(graph, depc.root, 'spam')
>>> result1 = ('path_list1 = %s' % ut.repr3(path_list1, nl=1))
>>> result2 = ('path_list2 = %s' % ut.repr3(path_list2, nl=2))
>>> result = '\n'.join([result1, result2])
>>> print(result)
path_list1 = [
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 0)],
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 1)],
]
path_list2 = [
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'fgweight', 0),
('fgweight', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'spam', 0),
],
[
('dummy_annot', 'probchip', 0),
('probchip', 'fgweight', 0),
('fgweight', 'spam', 0),
],
]
"""
path_multiedges = list(nx_all_simple_edge_paths(graph, source, target,
keys=True, data=data))
return path_multiedges
#import copy
#import utool as ut
#import networkx as nx
#all_simple_paths = list(nx.all_simple_paths(graph, source, target))
#paths_from_source2 = ut.unique(ut.lmap(tuple, all_simple_paths))
#path_edges2 = [tuple(ut.itertwo(path)) for path in paths_from_source2]
## expand paths with multi edge indexes
## hacky implementation
#expanded_paths = []
#for path in path_edges2:
# all_paths = [[]]
# for u, v in path:
# mutli_edge_data = graph.edge[u][v]
# items = list(mutli_edge_data.items())
# K = len(items)
# if len(items) == 1:
# path_iter = [all_paths]
# pass
# elif len(items) > 1:
# path_iter = [[copy.copy(p) for p in all_paths]
# for k_ in range(K)]
# for (k, edge_data), paths in zip(items, path_iter):
# for p in paths:
# p.append((u, v, {k: edge_data}))
# all_paths = ut.flatten(path_iter)
# expanded_paths.extend(all_paths)
#if data:
# path_multiedges = [[(u, v, k, d) for u, v, kd in path for k, d in kd.items()]
# for path in expanded_paths]
#else:
# path_multiedges = [[(u, v, k) for u, v, kd in path for k in kd.keys()]
# for path in expanded_paths]
## path_multiedges = [[(u, v, list(kd.keys())[0]) for u, v, kd in path]
## for path in expanded_paths]
## path_multiedges = expanded_paths
#return path_multiedges
def reverse_path_edges(edge_list):
return [(edge[1], edge[0],) + tuple(edge[2:]) for edge in edge_list][::-1]
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False):
"""Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by <NAME>, July 2004.
"""
from collections import deque
from functools import partial
if reverse:
G = G.reverse()
edges_iter = partial(G.edges_iter, keys=keys, data=data)
list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
queue = deque([(source, edges_iter(source))])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
yield edge
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if child not in visited_nodes:
visited_nodes.add(child)
queue.append((child, edges_iter(child)))
except StopIteration:
queue.popleft()
def bfs_conditional(G, source, reverse=False, keys=True, data=False,
yield_nodes=True, yield_condition=None,
continue_condition=None):
"""
Produce edges in a breadth-first-search starting at source, but only return
nodes that satisfiy a condition, and only iterate past a node if it
satisfies a different condition.
conditions are callables that take (G, child, edge) and return true or false
"""
from collections import deque
from functools import partial
if reverse and hasattr(G, 'reverse'):
G = G.reverse()
#edges_iter = partial(G.edges_iter, keys=keys, data=data)
import networkx as nx
if isinstance(G, nx.Graph):
edges_iter = partial(G.edges, data=data)
else:
edges_iter = partial(G.edges, keys=keys, data=data)
#list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
new_edges = edges_iter(source)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue = deque([(source, new_edges)])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if yield_condition is None or yield_condition(G, child, edge):
if yield_nodes:
yield child
else:
yield edge
# Add children to queue if the condition is satisfied
if continue_condition is None or continue_condition(G, child, edge):
if child not in visited_nodes:
visited_nodes.add(child)
new_edges = edges_iter(child)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((child, new_edges))
except StopIteration:
queue.popleft()
def bzip(*args):
"""
broadcasting zip. Only broadcasts on the first dimension
args = [np.array([1, 2, 3, 4]), [[1, 2, 3]]]
args = [np.array([1, 2, 3, 4]), [[1, 2, 3]]]
"""
needs_cast = [isinstance(arg, list) for arg in args]
arg_containers = [np.empty(len(arg), dtype=object) if flag else arg
for arg, flag in zip(args, needs_cast)]
empty_containers = ut.compress(arg_containers, needs_cast)
tocast_args = ut.compress(args, needs_cast)
for container, arg in zip(empty_containers, tocast_args):
container[:] = arg
#[a.shape for a in arg_containers]
bc = np.broadcast(*arg_containers)
return bc
def color_nodes(graph, labelattr='label', brightness=.878, sat_adjust=None):
""" Colors edges and nodes by nid """
import plottool as pt
import utool as ut
import networkx as nx
node_to_lbl = nx.get_node_attributes(graph, labelattr)
unique_lbls = ut.unique(node_to_lbl.values())
ncolors = len(unique_lbls)
if (ncolors) == 1:
unique_colors = [pt.NEUTRAL_BLUE]
else:
unique_colors = pt.distinct_colors(ncolors, brightness=brightness)
if sat_adjust:
unique_colors = [
pt.color_funcs.adjust_hsv_of_rgb(c, sat_adjust=sat_adjust)
for c in unique_colors
]
# Find edges and aids strictly between two nids
lbl_to_color = dict(zip(unique_lbls, unique_colors))
node_to_color = {node: lbl_to_color[lbl] for node, lbl in node_to_lbl.items()}
nx.set_node_attributes(graph, 'color', node_to_color)
ut.nx_ensure_agraph_color(graph)
def graph_info(graph, ignore=None, stats=False, verbose=False):
import utool as ut
node_attrs = list(graph.node.values())
edge_attrs = list(ut.take_column(graph.edges(data=True), 2))
if stats:
import utool
with utool.embed_on_exception_context:
import pandas as pd
node_df = | pd.DataFrame(node_attrs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with | tm.assert_raises_regex(TypeError, 'list of lists-like') | pandas.util.testing.assert_raises_regex |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, | Period("2011-01", freq="D") | pandas.Period |
from datetime import date, datetime, timedelta
import numpy as np
from pandas.tseries.tools import to_datetime
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta
import pandas.tslib as tslib
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'Week', 'WeekOfMonth',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano']
#----------------------------------------------------------------------
# DateOffset
class CacheableOffset(object):
_cacheable = True
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
_cacheable = False
_normalize_cache = True
def __init__(self, n=1, **kwds):
self.n = int(n)
self.kwds = kwds
if len(kwds) > 0:
self._offset = relativedelta(**kwds)
else:
self._offset = timedelta(1)
def apply(self, other):
if len(self.kwds) > 0:
if self.n > 0:
for i in xrange(self.n):
other = other + self._offset
else:
for i in xrange(-self.n):
other = other - self._offset
return other
else:
return other + timedelta(self.n)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, **self.kwds)
def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
attrs = [(k, v) for k, v in vars(self).iteritems()
if k not in ['kwds', '_offset', 'name', 'normalize',
'busdaycalendar']]
attrs.extend(self.kwds.items())
attrs = sorted(set(attrs))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
if hasattr(self, 'name') and len(self.name):
return self.name
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc'])
attrs = []
for attr in self.__dict__:
if ((attr == 'kwds' and len(self.kwds) == 0)
or attr.startswith('_')):
continue
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
out = '<%s ' % self.n + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
def __eq__(self, other):
if other is None:
return False
if isinstance(other, basestring):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if not isinstance(other, DateOffset):
return False
return self._params() == other._params()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
return self.apply(other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset!')
elif type(other) == type(self):
return self.__class__(self.n - other.n, **self.kwds)
else: # pragma: no cover
raise TypeError('Cannot subtract %s from %s'
% (type(other), type(self)))
def __rsub__(self, other):
return self.__class__(-self.n, **self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, **self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, **self.kwds)
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if type(dt) == date:
dt = datetime(dt.year, dt.month, dt.day)
if not self.onOffset(dt):
dt = dt - self.__class__(1, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if type(dt) == date:
dt = datetime(dt.year, dt.month, dt.day)
if not self.onOffset(dt):
dt = dt + self.__class__(1, **self.kwds)
return dt
def onOffset(self, dt):
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
@property
def rule_code(self):
raise NotImplementedError
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
return fstr
class BusinessDay(CacheableOffset, DateOffset):
"""
DateOffset subclass representing possibly n business days
"""
def __init__(self, n=1, **kwds):
self.n = int(n)
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.normalize = kwds.get('normalize', False)
@property
def rule_code(self):
return 'B'
def __repr__(self):
if hasattr(self, 'name') and len(self.name):
return self.name
className = getattr(self, '_outputName', self.__class__.__name__)
attrs = []
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
out = '<%s ' % self.n + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
if self.offset:
fstr += self._offset_str()
return fstr
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
def isAnchored(self):
return (self.n == 1)
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
# avoid slowness below
if abs(n) > 5:
k = n // 5
result = result + timedelta(7 * k)
if n < 0 and result.weekday() > 4:
n += 1
n -= 5 * k
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.normalize:
result = datetime(result.year, result.month, result.day)
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise TypeError('Only know how to combine business day with '
'datetime or timedelta!')
@classmethod
def onOffset(cls, dt):
return dt.weekday() < 5
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
excluding holidays
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
"""
_cacheable = False
def __init__(self, n=1, **kwds):
# Check we have the required numpy version
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise NotImplementedError("CustomBusinessDay requires numpy >= "
"1.7.0. Current version: " +
np.__version__)
self.n = int(n)
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.normalize = kwds.get('normalize', False)
self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')
holidays = kwds.get('holidays', [])
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
self.holidays = tuple(sorted(holidays))
self.kwds['holidays'] = self.holidays
self._set_busdaycalendar()
def _set_busdaycalendar(self):
holidays = np.array(self.holidays, dtype='datetime64[D]')
self.busdaycalendar = np.busdaycalendar(holidays=holidays,
weekmask=self.weekmask)
def __getstate__(self):
""""Return a pickleable state"""
state = self.__dict__.copy()
del state['busdaycalendar']
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
self._set_busdaycalendar()
@property
def rule_code(self):
return 'C'
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
if isinstance(dt, (datetime, basestring)):
dt = np.datetime64(dt, dtype=dtype)
if isinstance(dt, np.datetime64):
dt = dt.astype(dtype)
else:
raise TypeError('dt must be datestring, datetime or datetime64')
return dt
def apply(self, other):
if isinstance(other, datetime):
dtype = type(other)
elif isinstance(other, np.datetime64):
dtype = other.dtype
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise TypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta!')
dt64 = self._to_dt64(other)
day64 = dt64.astype('datetime64[D]')
time = dt64 - day64
if self.n<=0:
roll = 'forward'
else:
roll = 'backward'
result = np.busday_offset(day64, self.n, roll=roll,
busdaycal=self.busdaycalendar)
if not self.normalize:
result = result + time
result = result.astype(dtype)
if self.offset:
result = result + self.offset
return result
def onOffset(self, dt):
day64 = self._to_dt64(dt).astype('datetime64[D]')
return np.is_busday(day64, busdaycal=self.busdaycalendar)
class MonthEnd(DateOffset, CacheableOffset):
"""DateOffset of one month end"""
def apply(self, other):
other = datetime(other.year, other.month, other.day, tzinfo=other.tzinfo)
n = self.n
_, days_in_month = tslib.monthrange(other.year, other.month)
if other.day != days_in_month:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@classmethod
def onOffset(cls, dt):
days_in_month = tslib.monthrange(dt.year, dt.month)[1]
return dt.day == days_in_month
@property
def rule_code(self):
return 'M'
class MonthBegin(DateOffset, CacheableOffset):
"""DateOffset of one month at beginning"""
def apply(self, other):
n = self.n
if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
other = other + relativedelta(months=n, day=1)
return other
@classmethod
def onOffset(cls, dt):
return dt.day == 1
@property
def rule_code(self):
return 'MS'
class BusinessMonthEnd(CacheableOffset, DateOffset):
"""DateOffset increments between business EOM dates"""
def isAnchored(self):
return (self.n == 1)
def apply(self, other):
other = datetime(other.year, other.month, other.day)
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
@property
def rule_code(self):
return 'BM'
class BusinessMonthBegin(DateOffset, CacheableOffset):
"""DateOffset of one business month at beginning"""
def apply(self, other):
n = self.n
wkday, _ = | tslib.monthrange(other.year, other.month) | pandas.tslib.monthrange |
# coding: utf-8
# # Introducing Pandas
#
# From the [docs](http://pandas.pydata.org/pandas-docs/stable/index.html):
#
# > A Python package providing fast, flexible, and expressive data structures designed to make working with “relational” or “labeled” data both easy and intuitive.
#
# We also use [matplotlib](http://matplotlib.org/):
#
# > A Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.
#
# Requirements:
#
# ```
# (venv) $ pip install pandas matplotlib
# ```
#
# We're going to see a sliver of the functionality provided by these packages.
# In[1]:
import pandas as pd
pd.options.display.max_rows = 20
get_ipython().magic('matplotlib inline')
# ## Introducing `DataFrame`
# From the [docs](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe):
#
# > __DataFrame__ is a 2-dimensional labeled data structure with columns of potentially different types. You can think of it like a spreadsheet or SQL table. It is generally the most commonly used pandas object.
#
# There are many ways to get a `DataFrame`, but we'll start with a list of dictionaries.
# In[2]:
df = pd.DataFrame([
{'integer': 1, 'float': 1.0, 'string': 'one'},
{'integer': 2, 'float': 2.0, 'string': 'two'},
{'integer': 2, 'float': 2.0, 'string': 'two'},
{'integer': 3, 'float': 3.0, 'string': 'three'},
])
# Print some details about the DataFrame
df.info()
df
# The Jupyter Notebook automatically renders `DataFrame` as HTML!
#
# Note the first column; this is an `Index`, and is an essential component of `DataFrame`. Here, it was auto-generated, but we can also set it:
# In[3]:
df_index = df.set_index('string')
df_index
# The `Index` plays a key role in slicing the `DataFrame`:
# In[4]:
# Slice by label
df_index.loc['two']
# In[5]:
# Slice by position
df_index.iloc[-2:]
# We can also get individual columns:
# In[6]:
floats = df_index['float']
floats
# This is a `Series`, which is essentially a one-dimensional `DataFrame`, with a defined data type. Put another way, a `DataFrame` is a collection of `Series`.
#
# Note that the `Series` retained the `Index` of our `DataFrame`, so we can use similar slicing:
# In[7]:
floats['two']
# `Series` and `DataFrame` support element-wise operations:
# In[8]:
df_index['float'] * df_index['integer']
# In[9]:
df_index * df_index
# In[10]:
number_format = 'Number {}'.format
df_index['integer'].apply(number_format)
# In[11]:
df_index.applymap(number_format)
# ## Using `DataFrame` with Django
# Django gives us a handy way to build a list of dictionaries:
# In[12]:
gig_values = Gig.objects.past().published().values('date', 'venue__name', 'venue__city')
gig_values[:5]
# `DataFrame` doesn't know what to do with a `QuerySet`; it wants something that looks more like a list.
# We could use `list(gig_values)`, but `gig_values.iterator()` is more efficient.
# In[13]:
gigs = pd.DataFrame(gig_values.iterator())
gigs.info()
gigs
# This is a good place to start, and we've already got the answer to "How many gigs have we played"?
#
# However, there are a few ways we can make this easier to work with:
#
# - Shorter column names
# - Predictable column order
# - Indexed and sorted by date
#
# For more control, we'll use a list of tuples to initialize the DataFrame.
# In[14]:
gig_values = Gig.objects.past().published().values_list('date', 'venue__name', 'venue__city')
gig_values[:5]
# In[15]:
gigs = pd.DataFrame(gig_values.iterator(), columns=['date', 'venue', 'city'])
gigs['date'] = | pd.to_datetime(gigs['date']) | pandas.to_datetime |
from sklearn.preprocessing import MinMaxScaler
from Common.Comparators.Index.AbstractIndexComparator import AbstractIndexComparator
from Common.StockOptions.Yahoo.YahooStockOption import YahooStockOption
from sklearn import preprocessing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
import math
class IndexComparator(AbstractIndexComparator):
__corr_series: pd.Series = pd.Series()
__corr_idx_df: pd.DataFrame = | pd.DataFrame() | pandas.DataFrame |
import pydicom
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib as mpl
import os
mpl.rcParams["figure.dpi"] = 150
def extract_parameters(data, get_name = False):
'''This function extract metadata information on the dicom and outputs to dictionary'''
dct_params_1 = {'Model_name': 0x00081090,
'Manufacturer':0x00080070}
dct_params_2 = {'RCX0': 0x00186018,
'RCY0': 0x0018601a,
'RCX1': 0x0018601c,
'RCY1': 0x0018601e,
'Phys_units_X': 0x00186024,
'Phys_units_Y': 0x00186026,
'Phys_delta_X': 0x0018602c,
'Phys_delta_Y': 0x0018602e}
seq = data[0x0018, 0x6011]
data_sub = seq[0]
dct_params={}
for key in dct_params_1.keys():
try: # tämä sen takia että välttämättä aina ei löydy kaikkia dicom headeri kenttiä
dct_params[key] = [data[dct_params_1[key]].value]
except:
dct_params[key] = ['None']
for key in dct_params_2.keys():
try: # tämä sen takia että välttämättä aina ei löydy kaikkia dicom headeri kenttiä
dct_params[key] = [data_sub[dct_params_2[key]].value]
except:
dct_params[key] = ['None']
if get_name:
plt.imshow(data.pixel_array), plt.show()
cond = 'N'
while cond == 'N':
answer = input('Give the name of the transducer: ')
print(answer)
cond = input('Is the name okay [Y/N]?' )
dct_params['Transducer_name'] = [answer]
#print(dct_params)
return dct_params
def get_name_from_df(df, df1):
'''This function fetches the name from df based on information in df1 '''
#Compare:
res = df.loc[(df['Model_name']==df1['Model_name'][0])
& (df['RCX0'] == df1['RCX0'][0])
& (df['RCY0'] == df1['RCY0'][0])
& (df['RCX1'] == df1['RCX1'][0])
& (df['RCY1'] == df1['RCY1'][0])
& (df['Phys_units_X'] == df1['Phys_units_X'][0])
& (df['Phys_units_Y'] == df1['Phys_units_Y'][0])
& (df['Phys_delta_X'] == df1['Phys_delta_X'][0])
& (df['Phys_delta_Y'] == df1['Phys_delta_Y'][0])]
if res.empty:
name = 'Not_found_from_LUT' #if no name then return this
else:
name = res.iloc[0]['Transducer_name']#get name
return name
#%%
if __name__ == "__main__":
data_path = 'D:/AI_laatu/US_analysis/data_old/samsungmammo/'
filenames = os.listdir(data_path)
excel_writer = 'D:/AI_laatu/US_analysis/LUT2.xls'
for filename in filenames:
data = pydicom.dcmread(os.path.join(data_path, filename))
dct_params = extract_parameters(data, get_name = True)
df1 = pd.DataFrame(data = dct_params)
try:
df = pd.read_excel(excel_writer)
except:
df = | pd.DataFrame({}) | pandas.DataFrame |
"""Insert, mix, update, or normalize a GTFS."""
import logging
import pandas as pd
from mixer.gtfs.mixer.reader import ReaderETL
from mixer.gtfs.mixer.writer import WriterGTFS
from mixer.gtfs.reader.controller import Controller as CR
from mixer.gtfs.normalizer.controller import Controller as CN
from mixer.gtfs.mapper.controller import Controller as CM
from mixer.gtfs.versioner.controller import Controller as CV
from mixer.gtfs.subseter.controller import Controller as SC
from mixer.gtfs.generater.time_distance import TimeDist
from mixer.gtfs.crosser.model import Model as GC
from mixer.gtfs.separater.model import Model as GD
from utilities.decorator import logged
from mixer.glogger import logger
class Controller(object):
"""Main function of mixer."""
def __init__(self, db_name, gtfs_path):
"""Constructor."""
self.db_name = db_name
self.gtfs_path = gtfs_path
self.reader = ReaderETL(db_name)
self.writer = WriterGTFS(db_name)
def insert_first_gtfs(self, dict_df):
"""Insert gtfs if first one."""
stops = dict_df["Stop"]
dict_df["TransferTimesNDistances"] = self.gen_time_dist(stops, stops)
logger.log(logging.INFO, EventLog.log_insert_gtfs)
self.writer.insert_gtfs(dict_df)
def merge_gtfs(self, dict_df):
"""Merge and then insert gtfs."""
dict_gtfs_in_base = self.reader.read_database()
gtfs_ = list(dict_gtfs_in_base["Gtfs"])
gtfs_id = dict_df["Gtfs"]["Id"].iloc[0]
if gtfs_id in gtfs_:
msg = "{} gtfs is already in the database".format(gtfs_id)
logger.log(logging.ERROR, msg)
return 0
diff = GD(dict_df, dict_gtfs_in_base)
# Handle the GTFS intersection.
ct = GC(dict_gtfs_in_base, dict_df, self.db_name)
ct.gtfs_intersection()
new_data = diff.whats_new()
up_data = diff.whats_up(new_data)
end_data = diff.whats_end()
new_stops = new_data["Stop"]
other_stops = dict_gtfs_in_base["Stop"]
all_stops = | pd.concat([other_stops, new_stops]) | pandas.concat |
import pandas as pd
import numpy as np
import h5py, os, json, sys, shutil
from uuid import uuid4
from pythologist_image_utilities import map_image_ids
from pythologist_reader.qc import QC
from pythologist import CellDataFrame
"""
These are classes to help deal with cell-level image data
"""
class CellFrameGeneric(object):
"""
A generic CellFrameData object
"""
def __init__(self):
self._processed_image_id = None
self._images = {} # Database of Images
self._id = uuid4().hex
self.frame_name = None
self.data_tables = {
'cells':{'index':'cell_index',
'columns':['x','y','phenotype_index',
'region_index']},
'cell_tags':{'index':'db_id',
'columns':['tag_index','cell_index']},
'cell_measurements':{'index':'measurement_index',
'columns':['cell_index','statistic_index','feature_index','channel_index','value']},
'measurement_features':{'index':'feature_index',
'columns':['feature_label']},
'measurement_channels':{'index':'channel_index',
'columns':['channel_label','channel_abbreviation','image_id']},
'measurement_statistics':{'index':'statistic_index',
'columns':['statistic_label']},
'phenotypes':{'index':'phenotype_index',
'columns':['phenotype_label']},
'segmentation_images':{'index':'db_id',
'columns':['segmentation_label','image_id']},
'regions':{'index':'region_index',
'columns':['region_label','region_size','image_id']},
'cell_interactions':{'index':'db_id',
'columns':['cell_index','neighbor_cell_index','pixel_count','touch_distance']},
'tags':{'index':'tag_index',
'columns':['tag_label']}
}
self._data = {} # Do not acces directly. Use set_data_table and get_data_table to access.
for x in self.data_tables.keys():
self._data[x] = pd.DataFrame(columns=self.data_tables[x]['columns'])
self._data[x].index.name = self.data_tables[x]['index']
@property
def id(self):
"""
Returns the project UUID4
"""
return self._id
@property
def shape(self):
"""
Returns the (tuple) shape of the image (rows,columns)
"""
return self.processed_image.shape
@property
def processed_image_id(self):
"""
Returns (str) id of the frame object
"""
return self._processed_image_id
@property
def processed_image(self):
"""
Returns (numpy.array) of the processed_image
"""
return self._images[self._processed_image_id].copy()
def set_processed_image_id(self,image_id):
"""
Args:
image_id (str): set the id of the frame object
"""
self._processed_image_id = image_id
@property
def table_names(self):
"""
Return a list of data table names
"""
return list(self.data_tables.keys())
def set_data(self,table_name,table):
"""
Set the data table
Args:
table_name (str): the table name
table (pd.DataFrame): the input table
"""
# Assign data to the standard tables. Do some column name checking to make sure we are getting what we expect
if table_name not in self.data_tables: raise ValueError("Error table name doesn't exist in defined formats")
if set(list(table.columns)) != set(self.data_tables[table_name]['columns']): raise ValueError("Error column names don't match defined format\n"+\
str(list(table.columns))+"\n"+\
str(self.data_tables[table_name]['columns']))
if table.index.name != self.data_tables[table_name]['index']: raise ValueError("Error index name doesn't match defined format")
self._data[table_name] = table.loc[:,self.data_tables[table_name]['columns']].copy() # Auto-sort, and assign a copy so we aren't ever assigning by reference
def set_regions(self,regions,use_processed_region=True,unset_label='undefined',verbose=False):
"""
Alter the regions in the frame
Args:
regions (dict): a dictionary of mutually exclusive region labels and binary masks
if a region does not cover all the workable areas then it will be the only label
and the unused area will get the 'unset_label' as a different region
use_processed_region (bool): default True keep the processed region subtracted
unset_label (str): name of unset regions default (undefined)
"""
# delete our current regions
regions = regions.copy()
image_ids = list(self.get_data('mask_images')['image_id'])
image_ids = [x for x in image_ids if x != self.processed_image_id]
for image_id in image_ids: del self._images[image_id]
labels = list(regions.keys())
ids = [uuid4().hex for x in labels]
sizes = [regions[x].sum() for x in labels]
remainder = np.ones(self.processed_image.shape)
if use_processed_region: remainder = self.processed_image
for i,label in enumerate(labels):
my_image = regions[label]
if use_processed_region: my_image = my_image&self.processed_image
self._images[ids[i]] = my_image
remainder = remainder & (~my_image)
if verbose: sys.stderr.write("Remaining areas after setting are "+str(remainder.sum().sum())+"\n")
if remainder.sum().sum() > 0:
labels += [unset_label]
sizes += [remainder.sum().sum()]
ids += [uuid4().hex]
self._images[ids[-1]] = remainder
regions[unset_label] = remainder
regions2 = pd.DataFrame({'region_label':labels,
'region_size':sizes,
'image_id':ids
})
regions2.index.name = 'region_index'
self.set_data('regions',regions2)
def get_label(x,y,regions_dict):
for label in regions_dict:
if regions_dict[label][y][x] == 1: return label
return np.nan
raise ValueError("Coordinate is out of bounds for all regions.")
recode = self.get_data('cells').copy()
recode['new_region_label'] = recode.apply(lambda x: get_label(x['x'],x['y'],regions),1)
## see how many we need to drop because the centroid fall in an unprocessed region
if verbose: sys.stderr.write(str(recode.loc[recode['new_region_label'].isna()].shape[0])+" cells with centroids beyond the processed region are being dropped\n")
recode = recode.loc[~recode['new_region_label'].isna()].copy()
recode = recode.drop(columns='region_index').reset_index().\
merge(regions2[['region_label']].reset_index(),
left_on='new_region_label',right_on='region_label').\
drop(columns=['region_label','new_region_label']).set_index('cell_index')
self.set_data('cells',recode)
return
def get_data(self,table_name):
"""
Get the data table
Args:
table_name (pandas.DataFrame): the table you access by name
"""
return self._data[table_name].copy()
def read_hdf(self,h5file,location=''):
if location != '': location = location.split('/')
else: location = []
f = h5py.File(h5file,'r')
subgroup = f
for x in location:
subgroup = subgroup[x]
table_names = [x for x in subgroup['data']]
for table_name in table_names:
loc = '/'.join(location+['data',table_name])
#print(loc)
self.set_data(table_name,pd.read_hdf(h5file,loc))
# now get images
image_names = [x for x in subgroup['images']]
for image_name in image_names:
self._images[image_name] = np.array(subgroup['images'][image_name])
self.frame_name = subgroup['meta'].attrs['frame_name']
self._id = subgroup['meta'].attrs['id']
self.set_processed_image_id(subgroup['meta'].attrs['processed_image_id'])
return
def to_hdf(self,h5file,location='',mode='w'):
f = h5py.File(h5file,mode)
f.create_group(location+'/data')
f.create_group(location+'/images')
#f.create_group(location+'/meta')
f.close()
for table_name in self.data_tables.keys():
data_table = self.get_data(table_name)
data_table.to_hdf(h5file,
location+'/data/'+table_name,
mode='a',
format='table',
complib='zlib',
complevel=9)
f = h5py.File(h5file,'a')
for image_id in self._images.keys():
f.create_dataset(location+'/images/'+image_id,data=self._images[image_id],compression='gzip',compression_opts=9)
dset = f.create_dataset(location+'/meta', (100,), dtype=h5py.special_dtype(vlen=str))
dset.attrs['frame_name'] = self.frame_name
dset.attrs['processed_image_id'] = self.processed_image_id
dset.attrs['id'] = self._id
f.close()
def cell_map(self):
"""
Return a dataframe of cell ID's and locations
"""
if 'cell_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['cell_map','image_id']
return map_image_ids(self.get_image(cmid)).rename(columns={'id':'cell_index'})
def cell_map_image(self):
"""
Return a the image of cells by ID's
"""
if 'cell_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['cell_map','image_id']
return self.get_image(cmid)
def edge_map(self):
"""
Return a dataframe of cells by ID's of coordinates only on the edge of the cells
"""
if 'edge_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['edge_map','image_id']
return map_image_ids(self.get_image(cmid)).\
rename(columns={'id':'cell_index'})
def edge_map_image(self):
"""
Return an image of edges of integers by ID
"""
if 'edge_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['edge_map','image_id']
return self.get_image(cmid)
def segmentation_info(self):
"""
Return a dataframe with info about segmentation like cell areas and circumferences
"""
# handle the case where there is no edge data
if self.edge_map() is None:
return pd.DataFrame(index=self.get_data('cells').index,columns=['edge_pixels','area_pixels'])
return self.edge_map().reset_index().groupby(['cell_index']).count()[['x']].rename(columns={'x':'edge_pixels'}).\
merge(self.cell_map().reset_index().groupby(['cell_index']).count()[['x']].rename(columns={'x':'area_pixels'}),
left_index=True,
right_index=True).reset_index().set_index('cell_index')
def interaction_map(self):
"""
Returns:
pandas.DataFrame: return a dataframe of which cells are in contact with one another
"""
return self.get_data('cell_interactions')
def set_interaction_map(self,touch_distance=1):
"""
Measure the cell-cell contact interactions
Args:
touch_distance (int): optional default is 1 distance to look away from a cell for another cell
"""
full = self.cell_map()
edge = self.edge_map()
if full is None or edge is None: return None
d1 = edge.reset_index()
d1['key'] = 1
d2 = pd.DataFrame({'mod':[-1*touch_distance,0,touch_distance]})
d2['key'] = 1
d3 = d1.merge(d2,on='key').merge(d2,on='key')
d3['x'] = d3['x'].add(d3['mod_x'])
d3['y'] = d3['y'].add(d3['mod_y'])
d3 = d3[['x','y','cell_index','key']].rename(columns={'cell_index':'neighbor_cell_index'})
im = full.reset_index().merge(d3,on=['x','y']).\
query('cell_index!=neighbor_cell_index').\
drop_duplicates().groupby(['cell_index','neighbor_cell_index']).count()[['key']].reset_index().\
rename(columns={'key':'pixel_count'})
im['touch_distance'] = touch_distance
im.index.name='db_id'
self.set_data('cell_interactions',im)
@property
def thresholds(self):
raise ValueError('Override this to use it.')
def get_channels(self,all=False):
"""
Return a dataframe of the Channels
Args:
all (bool): default False if all is set to true will also include excluded channels (like autofluoresence)
Returns:
pandas.DataFrame: channel information
"""
if all: return self.get_data('measurement_channels')
d = self.get_data('measurement_channels')
return d.loc[~d['channel_label'].isin(self.excluded_channels)]
def get_regions(self):
return self.get_data('regions')
def get_labeled_raw(self,feature_label,statistic_label,all=False,channel_abbreviation=True):
"""
Like get raw but add frame labels
"""
df = self.get_raw(feature_label,statistic_label,all=all,channel_abbreviation=channel_abbreviation).reset_index()
df['frame_name'] = self.frame_name
df['frame_id'] = self.id
return df.set_index(['frame_name','frame_id','cell_index'])
def get_raw(self,feature_label,statistic_label,all=False,channel_abbreviation=True):
"""
Get the raw data
Args:
feature_label (str): name of the feature
statistic_label (str): name of the statistic to extract
all (bool): default False if True put out everything including excluded channels
channel_abbreviation (bool): default True means use the abbreivations if available
Returns:
pandas.DataFrame: the dataframe
"""
stats = self.get_data('measurement_statistics').reset_index()
stats = stats.loc[stats['statistic_label']==statistic_label,'statistic_index'].iloc[0]
feat = self.get_data('measurement_features').reset_index()
feat = feat.loc[feat['feature_label']==feature_label,'feature_index'].iloc[0]
#region = self.get_data('regions').reset_index()
#region = region.loc[region['region_label']==region_label,'region_index'].iloc[0]
measure = self.get_data('cell_measurements')
measure = measure.loc[(measure['statistic_index']==stats)&(measure['feature_index']==feat)]
channels = self.get_data('measurement_channels')
if not all: channels = channels.loc[~channels['channel_label'].isin(self.excluded_channels)]
measure = measure.merge(channels,left_on='channel_index',right_index=True)
measure = measure.reset_index().pivot(index='cell_index',columns='channel_label',values='value')
if not channel_abbreviation: return measure
temp = dict(zip(self.get_data('measurement_channels')['channel_label'],
self.get_data('measurement_channels')['channel_abbreviation']))
return measure.rename(columns=temp)
def default_raw(self):
# override this
return None
def copy(self):
mytype = type(self)
them = mytype()
for x in self.data_tables.keys():
them._data[x] = self._data[x].copy()
return them
@property
def excluded_channels(self):
raise ValueError("Must be overridden")
def binary_calls(self):
"""
Return all the binary feature calls (alias)
"""
return phenotype_calls()
def phenotype_calls(self):
"""
Return all the binary feature calls
"""
phenotypes = self.get_data('phenotypes')['phenotype_label'].dropna().tolist()
temp = pd.DataFrame(index=self.get_data('cells').index,columns=phenotypes)
temp = temp.fillna(0)
temp = temp.merge(self.cell_df()[['phenotype_label']],left_index=True,right_index=True)
for phenotype in phenotypes:
temp.loc[temp['phenotype_label']==phenotype,phenotype]=1
return temp.drop(columns='phenotype_label').astype(np.int8)
def scored_calls(self):
# Must be overridden
return None
@property
def cdf(self):
"""
Return the pythologist.CellDataFrame of the frame
"""
# get our region sizes
region_count = self.get_data('regions').groupby('region_label').count()['region_size']
if region_count[region_count>1].shape[0]>0: raise ValueError("duplicate region labels not supported") # add a saftey check
region_sizes = self.get_data('regions').set_index('region_label')['region_size'].astype(int).to_dict()
# get our cells
temp1 = self.get_data('cells').drop(columns='phenotype_index').\
merge(self.get_data('regions'),
left_on='region_index',
right_index=True).drop(columns=['image_id','region_index','region_size'])
temp1['regions'] = temp1.apply(lambda x: region_sizes,1)
temp2 = self.scored_calls()
if temp2 is not None:
temp2 = temp2.apply(lambda x:
dict(zip(
list(x.index),
list(x)
))
,1).reset_index().rename(columns={0:'scored_calls'}).set_index('cell_index')
temp1 = temp1.merge(temp2,left_index=True,right_index=True)
else:
temp1['scored_calls'] = temp1.apply(lambda x: {},1)
temp3 = self.phenotype_calls().apply(lambda x:
dict(zip(
list(x.index),
list(x)
))
,1).reset_index().rename(columns={0:'phenotype_calls'}).set_index('cell_index')
temp1 = temp1.merge(temp3,left_index=True,right_index=True)
#temp1['phenotypes_present'] = json.dumps(list(
# sorted([x for x in self.get_data('phenotypes')['phenotype_label'] if x is not np.nan])
# ))
temp4 = None
# extract default values only if we have whole cell
#if "Whole Cell" in self.get_data('measurement_features')['feature_label'].tolist():
temp4 = self.default_raw()
if temp4 is not None:
temp4 = temp4.apply(lambda x:
dict(zip(
list(x.index),
list(x)
))
,1).reset_index().rename(columns={0:'channel_values'}).set_index('cell_index')
temp1 = temp1.merge(temp4,left_index=True,right_index=True)
else:
temp1['channel_values'] = np.nan
#temp5 = self.interaction_map().groupby('cell_index').\
# apply(lambda x: json.dumps(list(sorted(x['neighbor_cell_index'])))).reset_index().\
# rename(columns={0:'neighbor_cell_index'}).set_index('cell_index')
# Get neighbor data .. may not be available for all cells
# Set a default of a null frame and only try and set if there are some neighbors present
neighbors = pd.DataFrame(index=self.get_data('cells').index,columns=['neighbors'])
if self.interaction_map().shape[0] > 0:
neighbors = self.interaction_map().groupby('cell_index').\
apply(lambda x:
dict(zip(
x['neighbor_cell_index'].astype(int),x['pixel_count'].astype(int)
))
).reset_index().rename(columns={0:'neighbors'}).set_index('cell_index')
# only do edges if we have them by setting a null value for default
edge_length = pd.DataFrame(index=self.get_data('cells').index,columns=['edge_length'])
if self.edge_map() is not None:
edge_length = self.edge_map().reset_index().groupby('cell_index').count()[['x']].\
rename(columns={'x':'edge_length'})
edge_length['edge_length'] = edge_length['edge_length'].astype(int)
cell_area = pd.DataFrame(index=self.get_data('cells').index,columns=['cell_area'])
if self.cell_map() is not None:
cell_area = self.cell_map().reset_index().groupby('cell_index').count()[['x']].\
rename(columns={'x':'cell_area'})
cell_area['cell_area'] = cell_area['cell_area'].astype(int)
temp5 = cell_area.merge(edge_length,left_index=True,right_index=True).merge(neighbors,left_index=True,right_index=True,how='left')
temp5.loc[temp5['neighbors'].isna(),'neighbors'] = temp5.loc[temp5['neighbors'].isna(),'neighbors'].apply(lambda x: {}) # these are ones we actuall have measured
temp1 = temp1.merge(temp5,left_index=True,right_index=True,how='left')
temp1.loc[temp1['neighbors'].isna(),'neighbors'] = np.nan # These we were not able to measure
temp1['frame_name'] = self.frame_name
temp1['frame_id'] = self.id
temp1 = temp1.reset_index()
temp1 = temp1.sort_values('cell_index').reset_index(drop=True)
temp1['sample_name'] = 'undefined'
temp1['project_name'] = 'undefined'
temp1['sample_id'] = 'undefined'
temp1['project_id'] = 'undefined'
def _get_phenotype(d):
if d!=d: return np.nan # set to null if there is nothing in phenotype calls
vals = [k for k,v in d.items() if v == 1]
return np.nan if len(vals) == 0 else vals[0]
temp1['phenotype_label'] = temp1.apply(lambda x:
_get_phenotype(x['phenotype_calls'])
,1)
# Let's tack on the image shape
temp1['frame_shape'] = temp1.apply(lambda x: self.shape,1)
return CellDataFrame(temp1)
def binary_df(self):
temp1 = self.phenotype_calls().stack().reset_index().\
rename(columns={'level_1':'binary_phenotype',0:'score'})
temp1.loc[temp1['score']==1,'score'] = '+'
temp1.loc[temp1['score']==0,'score'] = '-'
temp1['gated'] = 0
temp1.index.name = 'db_id'
return temp1
def cell_df(self):
celldf = self.get_data('cells').\
merge(self.get_data('regions').rename(columns={'image_id':'region_image_id'}),
left_on='region_index',
right_index=True).\
merge(self.get_data('phenotypes'),left_on='phenotype_index',right_index=True).\
merge(self.segmentation_info(),left_index=True,right_index=True,how='left')
return celldf.drop(columns=['phenotype_index','region_index'])
def complete_df(self):
# a dataframe for every cell that has everything
return
def get_image(self,image_id):
"""
Args:
image_id (str): get the image by this id
Returns:
numpy.array: an image representing a 2d array
"""
return self._images[image_id].copy()
class CellSampleGeneric(object):
def __init__(self):
self._frames = {}
self._key = None
self._id = uuid4().hex
self.sample_name = np.nan
return
@property
def id(self):
"""
Return the UUID4 str
"""
return self._id
def create_cell_frame_class(self):
return CellFrameGeneric()
@property
def frame_ids(self):
"""
Return the list of frame IDs
"""
return sorted(list(self._frames.keys()))
@property
def key(self):
"""
Return a pandas.DataFrame of info about the sample
"""
return self._key
def get_frame(self,frame_id):
"""
Args:
frame_id (str): the ID of the frame you want to access
Returns:
CellFrameGeneric: the cell frame
"""
return self._frames[frame_id]
@property
def cdf(self):
"""
Return the pythologist.CellDataFrame of the sample
"""
output = []
for frame_id in self.frame_ids:
temp = self.get_frame(frame_id).cdf
temp['sample_name'] = self.sample_name
temp['sample_id'] = self.id
output.append(temp)
output = pd.concat(output).reset_index(drop=True)
output.index.name = 'db_id'
output['project_name'] = 'undefined'
output['project_id'] = 'undefined'
return CellDataFrame(pd.DataFrame(output))
def to_hdf(self,h5file,location='',mode='w'):
#print(mode)
f = h5py.File(h5file,mode)
#f.create_group(location+'/meta')
#f.create_dataset(location+'/meta/id',data=self.id)
#f.create_dataset(location+'/meta/sample_name',data=self.sample_name)
if location+'/meta' in f:
del f[location+'/meta']
dset = f.create_dataset(location+'/meta', (100,), dtype=h5py.special_dtype(vlen=str))
dset.attrs['sample_name'] = self.sample_name
dset.attrs['id'] = self._id
if location+'/frames' in f:
del f[location+'/frames']
f.create_group(location+'/frames')
f.close()
for frame_id in self.frame_ids:
frame = self._frames[frame_id]
frame.to_hdf(h5file,
location+'/frames/'+frame_id,
mode='a')
self._key.to_hdf(h5file,location+'/info',mode='r+',format='table',complib='zlib',complevel=9)
def read_hdf(self,h5file,location=''):
if location != '': location = location.split('/')
else: location = []
f = h5py.File(h5file,'r')
subgroup = f
for x in location:
subgroup = subgroup[x]
self._id = subgroup['meta'].attrs['id']
self.sample_name = subgroup['meta'].attrs['sample_name']
frame_ids = [x for x in subgroup['frames']]
for frame_id in frame_ids:
cellframe = self.create_cell_frame_class()
loc = '/'.join(location+['frames',frame_id])
#print(loc)
cellframe.read_hdf(h5file,location=loc)
self._frames[frame_id] = cellframe
#self.frame_name = str(subgroup['frames'][frame_id]['meta']['frame_name'])
#self._id = str(subgroup['frames'][frame_id]['meta']['id'])
loc = '/'.join(location+['info'])
#print(loc)
self._key = pd.read_hdf(h5file,loc)
f.close()
return
def cell_df(self):
frames = []
for frame_id in self.frame_ids:
frame = self.get_frame(frame_id).cell_df().reset_index()
key_line = self.key.set_index('frame_id').loc[[frame_id]].reset_index()
key_line['key'] = 1
frame['key'] = 1
frame = key_line.merge(frame,on='key').drop(columns = 'key')
frames.append(frame)
frames = pd.concat(frames).reset_index(drop=True)
frames.index.name = 'sample_cell_index'
return frames
def binary_df(self):
fc = self.cell_df()[['frame_id','cell_index']].reset_index()
frames = []
for frame_id in self.frame_ids:
frame = self.get_frame(frame_id).binary_df()
key_line = self.key.set_index('frame_id').loc[[frame_id]].reset_index()
key_line['key'] = 1
frame['key'] = 1
frame = key_line.merge(frame,on='key').drop(columns = 'key')
frames.append(frame)
return fc.merge(pd.concat(frames).reset_index(drop=True),on=['frame_id','cell_index'])
def interaction_map(self):
fc = self.cell_df()[['frame_id','cell_index']].reset_index()
frames = []
for frame_id in self.frame_ids:
frame = self.get_frame(frame_id).interaction_map()
key_line = self.key.set_index('frame_id').loc[[frame_id]].reset_index()
key_line['key'] = 1
frame['key'] = 1
frame = key_line.merge(frame,on='key').drop(columns = 'key')
frames.append(frame)
frames = pd.concat(frames).reset_index(drop=True)
return frames.merge(fc,on=['frame_id','cell_index']).\
merge(fc.rename(columns={'sample_cell_index':'neighbor_sample_cell_index',
'cell_index':'neighbor_cell_index'}),
on=['frame_id','neighbor_cell_index'])
def frame_iter(self):
"""
An iterator of frames
Returns:
CellFrameGeneric
"""
for frame_id in self.frame_ids:
yield self.get_frame(frame_id)
def get_labeled_raw(self,feature_label,statistic_label,all=False,channel_abbreviation=True):
"""
Return a matrix of raw data labeled by samples and frames names/ids
Returns:
DataFrame
"""
vals = []
for f in self.frame_iter():
df = f.get_labeled_raw(feature_label,statistic_label,all=all,channel_abbreviation=channel_abbreviation).reset_index()
df['sample_name'] = self.sample_name
df['sample_id'] = self.id
vals.append(df)
return pd.concat(vals).set_index(['sample_name','sample_id','frame_name','frame_id','cell_index'])
class CellProjectGeneric(object):
def __init__(self,h5path,mode='r'):
"""
Create a CellProjectGeneric object or read from/add to an existing one
Args:
h5path (str): path to read/from or store/to
mode (str): 'r' read, 'a' append, 'w' create/write, 'r+' create/append if necessary
"""
self._key = None
self.h5path = h5path
self.mode = mode
self._sample_cache_name = None
self._sample_cache = None
if mode =='r':
if not os.path.exists(h5path): raise ValueError("Cannot read a file that does not exist")
if mode == 'w' or mode == 'r+':
f = h5py.File(self.h5path,mode)
if '/samples' not in f.keys():
f.create_group('/samples')
if '/meta' not in f.keys():
dset = f.create_dataset('/meta', (100,), dtype=h5py.special_dtype(vlen=str))
else:
dset = f['/meta']
dset.attrs['project_name'] = np.nan
dset.attrs['microns_per_pixel'] = np.nan
dset.attrs['id'] = uuid4().hex
f.close()
return
def copy(self,path,overwrite=False,output_mode='r'):
if os.path.exists(path) and overwrite is False:
raise ValueError("Cannot overwrite unless overwrite is set to True")
shutil.copy(self.h5path,path)
return self.__class__(path,mode=output_mode)
def to_hdf(self,path,overwrite=False):
"""
Write this object to another h5 file
"""
self.copy(path,overwrite=overwrite)
return
@classmethod
def concat(self,path,array_like,overwrite=False,verbose=False):
if os.path.exists(path) and overwrite is False:
raise ValueError("Cannot overwrite unless overwrite is set to True")
# copy the first
arr = [x for x in array_like]
if len(arr) == 0: raise ValueError("cannot concat empty list")
if verbose: sys.stderr.write("Copy the first element\n")
cpi = arr[0].copy(path,output_mode='r+',overwrite=overwrite)
#shutil.copy(arr[0].h5path,path)
#cpi = CellProjectGeneric(path,mode='r+')
if len(arr) == 1: return
for project in array_like[1:]:
if verbose: sys.stderr.write("Add project "+str(project.id)+" "+str(project.project_name)+"\n")
for s in project.sample_iter():
if verbose: sys.stderr.write(" Add sample "+str(s.id)+" "+str(s.sample_name)+"\n")
cpi.append_sample(s)
return cpi
def append_sample(self,sample):
"""
Append sample to the project
Args:
sample (CellSampleGeneric): sample object
"""
if self.mode == 'r': raise ValueError("Error: cannot write to a path in read-only mode.")
sample.to_hdf(self.h5path,location='samples/'+sample.id,mode='a')
current = self.key
if current is None:
current = pd.DataFrame([{'sample_id':sample.id,
'sample_name':sample.sample_name}])
current.index.name = 'db_id'
else:
iteration = max(current.index)+1
addition = pd.DataFrame([{'db_id':iteration,
'sample_id':sample.id,
'sample_name':sample.sample_name}]).set_index('db_id')
current = pd.concat([current,addition])
current.to_hdf(self.h5path,'info',mode='r+',complib='zlib',complevel=9,format='table')
return
def qc(self,*args,**kwargs):
"""
Returns:
QC: QC class to do quality checks
"""
return QC(self,*args,**kwargs)
@property
def id(self):
"""
Returns the (str) UUID4 string
"""
f = h5py.File(self.h5path,'r')
name = f['meta'].attrs['id']
f.close()
return name
@property
def project_name(self):
"""
Return or set the (str) project_name
"""
f = h5py.File(self.h5path,'r')
name = f['meta'].attrs['project_name']
f.close()
return name
@project_name.setter
def project_name(self,name):
if self.mode == 'r': raise ValueError('cannot write if read only')
f = h5py.File(self.h5path,'r+')
f['meta'].attrs['project_name'] = name
f.close()
@property
def microns_per_pixel(self):
"""
Return or set the (float) microns_per_pixel
"""
f = h5py.File(self.h5path,'r')
name = f['meta'].attrs['microns_per_pixel']
f.close()
return name
@microns_per_pixel.setter
def microns_per_pixel(self,value):
if self.mode == 'r': raise ValueError('cannot write if read only')
f = h5py.File(self.h5path,'r+')
f['meta'].attrs['microns_per_pixel'] = value
f.close()
def set_id(self,name):
"""
Set the project ID
Args:
name (str): project_id
"""
if self.mode == 'r': raise ValueError('cannot write if read only')
f = h5py.File(self.h5path,'r+')
#dset = f.create_dataset('/meta', (100,), dtype=h5py.special_dtype(vlen=str))
f['meta'].attrs['id'] = name
f.close()
@property
def cdf(self):
"""
Return the pythologist.CellDataFrame of the project
"""
output = []
for sample_id in self.sample_ids:
temp = self.get_sample(sample_id).cdf
temp['project_name'] = self.project_name
temp['project_id'] = self.id
output.append(temp)
output = pd.concat(output).reset_index(drop=True)
output.index.name = 'db_id'
cdf = CellDataFrame(pd.DataFrame(output))
if self.microns_per_pixel: cdf.microns_per_pixel = self.microns_per_pixel
return cdf
def cell_df(self):
samples = []
for sample_id in self.sample_ids:
sample = self.get_sample(sample_id).cell_df().reset_index()
key_line = self.key.set_index('sample_id').loc[[sample_id]].reset_index()
key_line['key'] = 1
sample['key'] = 1
sample = key_line.merge(sample,on='key').drop(columns = 'key')
samples.append(sample)
samples = pd.concat(samples).reset_index(drop=True)
samples.index.name = 'project_cell_index'
return samples
def binary_df(self):
fc = self.cell_df()[['sample_id','frame_id','cell_index']].reset_index()
samples = []
for sample_id in self.sample_ids:
sample = self.get_sample(sample_id).binary_df()
key_line = self.key.set_index('sample_id').loc[[sample_id]].reset_index()
key_line['key'] = 1
sample['key'] = 1
sample = key_line.merge(sample,on='key').drop(columns = 'key')
samples.append(sample)
return fc.merge(pd.concat(samples).reset_index(drop=True),on=['sample_id','frame_id','cell_index'])
def interaction_map(self):
fc = self.cell_df()[['sample_id','frame_id','cell_index']].reset_index()
samples = []
for sample_id in self.sample_ids:
sample = self.get_sample(sample_id).interaction_map()
key_line = self.key.set_index('sample_id').loc[[sample_id]].reset_index()
key_line['key'] = 1
sample['key'] = 1
sample = key_line.merge(sample,on='key').drop(columns = 'key')
samples.append(sample)
samples = pd.concat(samples).reset_index(drop=True)
return samples.merge(fc,on=['sample_id','frame_id','cell_index']).\
merge(fc.rename(columns={'project_cell_index':'neighbor_project_cell_index',
'cell_index':'neighbor_cell_index'}),
on=['sample_id','frame_id','neighbor_cell_index'])
def create_cell_sample_class(self):
return CellSampleGeneric()
@property
def sample_ids(self):
"""
Return the list of sample_ids
"""
return sorted(list(self.key['sample_id']))
def get_sample(self,sample_id):
"""
Get the sample_id
Args:
sample_id (str): set the sample id
"""
if self._sample_cache_name == sample_id:
return self._sample_cache
sample = self.create_cell_sample_class()
sample.read_hdf(self.h5path,'samples/'+sample_id)
self._sample_cache_name = sample_id
self._sample_cache = sample
return sample
@property
def key(self):
"""
Get info about the project
"""
f = h5py.File(self.h5path,'r')
val = False
if 'info' in [x for x in f]: val = True
f.close()
return None if not val else pd.read_hdf(self.h5path,'info')
def sample_iter(self):
"""
An interator of CellSampleGeneric
"""
for sample_id in self.sample_ids: yield self.get_sample(sample_id)
def frame_iter(self):
"""
An interator of CellFrameGeneric
"""
for s in self.sample_iter():
for frame_id in s.frame_ids:
yield s.get_frame(frame_id)
@property
def channel_image_dataframe(self):
"""
dataframe within info about channels and images
"""
pname = self.project_name
pid = self.id
measurements = []
for s in self.sample_iter():
sname = s.sample_name
sid = s.id
for f in s.frame_iter():
fname = f.frame_name
fid = f.id
mc = f.get_data('measurement_channels')
mc['project_name'] = pname
mc['project_id'] = pid
mc['sample_name'] = sname
mc['sample_id'] = sid
mc['frame_name'] = fname
mc['frame_id'] = fid
mc['processed_image_id'] = f.processed_image_id
measurements.append(mc)
return pd.concat(measurements).reset_index(drop=True)
def get_image(self,sample_id,frame_id,image_id):
"""
Get an image by sample frame and image id
Args:
sample_id (str): unique sample id
frame_id (str): unique frame id
image_id (str): unique image id
Returns:
numpy.array: 2d image array
"""
s = self.get_sample(sample_id)
f = s.get_frame(frame_id)
return f.get_image(image_id)
def get_labeled_raw(self,feature_label,statistic_label,all=False,channel_abbreviation=True):
"""
Return a matrix of raw data labeled by samples and frames names/ids
Returns:
DataFrame
"""
vals = []
for s in self.sample_iter():
df = s.get_labeled_raw(feature_label,statistic_label,all=all,channel_abbreviation=channel_abbreviation).reset_index()
df['project_name'] = self.project_name
df['project_id'] = self.id
vals.append(df)
return | pd.concat(vals) | pandas.concat |
from abc import ABC, abstractmethod
from collections import defaultdict
from datetime import datetime
from functools import cached_property
from typing import List, Dict, Union, Optional, Iterable
import numpy as np
import pandas as pd
from gym import Space, spaces
from pandas import Interval
from torch.utils.data import Dataset
from yacht import Mode, utils
from yacht.data.markets import Market
from yacht.data.scalers import Scaler
from yacht.data.transforms import Compose
from yacht.logger import Logger
class DatasetPeriod:
def __init__(
self,
start: datetime,
end: datetime,
window_size: int,
include_weekends: bool,
take_action_at: str = 'current',
frequency: str = 'd'
):
assert frequency in ('d', )
self.unadjusted_start = start
self.unadjusted_end = end
self.period_adjustment_size = self.compute_period_adjustment_size(
window_size=window_size,
take_action_at=take_action_at
)
# Adjust start with a 'window_size' length so we take data from the past & actually start from the given start.
self.start = utils.adjust_period_with_window(
datetime_point=start,
window_size=self.period_adjustment_size, # We also use the initial price within the period.
action='-',
include_weekends=include_weekends,
frequency=frequency
)
self.end = end
self.window_size = window_size
self.include_weekends = include_weekends
self.take_action_at = take_action_at
self.frequency = frequency
assert self.start <= self.unadjusted_start
@classmethod
def compute_period_adjustment_size(cls, window_size: int, take_action_at: str) -> int:
assert take_action_at in ('current', 'next')
if take_action_at == 'current':
return window_size - 1
elif take_action_at == 'next':
return window_size
def __len__(self) -> int:
return utils.len_period_range(
start=self.start,
end=self.end,
include_weekends=self.include_weekends
)
class AssetDataset(Dataset, ABC):
PRICE_FEATURES = (
'Close',
'Open',
'High',
'Low'
)
def __init__(
self,
market: Market,
storage_dir: str,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
mode: Mode,
logger: Logger,
window_size: int = 1,
):
"""
market:
storage_dir:
ticker:
intervals: data bars frequency
features: observation data features
decision_price_feature: the feature that it will used for buying / selling assets or other decision making
start:
end:
render_intervals: a list of datetime intervals to know if this environment should be rendered or not.
normalizer:
window_size: The past information that you want to add to the current item that you query from the dataset.
data: If data != None, it will be encapsulated within the Dataset Object, otherwise it will be queried
from the market.
"""
assert '1d' == intervals[0], 'One day bar interval is mandatory to exist & index=0 in input.intervals config.'
assert window_size >= 1
self.market = market
self.storage_dir = storage_dir
self.intervals = intervals
self.features = features
self.decision_price_feature = decision_price_feature
self.render_intervals = render_intervals
self.period = period
self.mode = mode
self.logger = logger
self.window_size = window_size
def close(self):
self.market.close()
@property
def period_window_size(self) -> int:
return self.period.window_size
@property
def period_adjustment_size(self) -> int:
return self.period.period_adjustment_size
@property
def take_action_at(self) -> str:
return self.period.take_action_at
@property
def first_observation_index(self) -> int:
# Starting from 0 & the minimum value for the window_size is 1.
return self.period_window_size - 1
@property
def last_observation_index(self) -> int:
return self.period_adjustment_size + self.num_days - 1
@property
def unadjusted_start(self) -> datetime:
return self.period.unadjusted_start
@property
def unadjusted_end(self) -> datetime:
return self.period.unadjusted_end
@property
def start(self) -> datetime:
return self.period.start
@property
def end(self) -> datetime:
return self.period.end
@property
def include_weekends(self) -> bool:
return self.market.include_weekends
@cached_property
def should_render(self) -> bool:
# Because it is not efficient to render all the environments, we choose over some desired logic what to render.
for render_interval in self.render_intervals:
if self.start in render_interval or self.end in render_interval:
return True
return False
@property
@abstractmethod
def num_days(self) -> int:
pass
@property
@abstractmethod
def num_assets(self) -> int:
pass
@property
@abstractmethod
def asset_tickers(self) -> List[str]:
pass
@abstractmethod
def index_to_datetime(self, integer_index: int) -> datetime:
pass
@abstractmethod
def inverse_scaling(self, observation: dict, **kwargs) -> dict:
pass
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __getitem__(self, current_index: int) -> Dict[str, np.array]:
"""
Args:
current_index: The relative index the data will be given from.
Returns:
The data features within the [current_index - window_size + 1, current_index] interval.
"""
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def get_prices(self) -> pd.DataFrame:
pass
@abstractmethod
def get_decision_prices(self, t_tick: Optional[int] = None, **kwargs) -> pd.Series:
pass
@abstractmethod
def compute_mean_price(self, start: datetime, end: datetime) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def get_external_observation_space(self) -> Dict[str, Space]:
"""
Returns the gym spaces observation space in the format that the dataset gives the data.
"""
pass
class SingleAssetDataset(AssetDataset, ABC):
def __init__(
self,
ticker: str,
market: Market,
storage_dir: str,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
render_tickers: List[str],
mode: Mode,
logger: Logger,
scaler: Scaler,
window_transforms: Optional[Compose] = None,
window_size: int = 1,
data: Dict[str, pd.DataFrame] = None
):
super().__init__(
market=market,
storage_dir=storage_dir,
intervals=intervals,
features=features,
decision_price_feature=decision_price_feature,
period=period,
render_intervals=render_intervals,
mode=mode,
logger=logger,
window_size=window_size,
)
self.ticker = ticker
self.scaler = scaler
self.window_transforms = window_transforms
self.render_tickers = render_tickers
if data is not None:
self.data = data
else:
self.data = dict()
for interval in self.intervals:
self.data[interval] = self.market.get(
ticker=ticker,
interval=interval,
start=self.start,
end=self.end,
features=self.features + [self.decision_price_feature],
squeeze=False
)
self.prices = self.get_prices()
def __str__(self) -> str:
return self.ticker
def __len__(self) -> int:
# All the adjusted interval.
return len(self.prices)
@property
def num_days(self) -> int:
# Only the unadjusted interval.
return utils.len_period_range(
start=self.unadjusted_start,
end=self.unadjusted_end,
include_weekends=self.include_weekends
)
@property
def num_assets(self) -> int:
return 1
@property
def asset_tickers(self) -> List[str]:
return [self.ticker]
@cached_property
def should_render(self) -> bool:
if self.ticker in self.render_tickers:
return super().should_render
return False
def index_to_datetime(self, integer_index: Union[int, Iterable]) -> Union[datetime, Iterable[datetime]]:
return self.data['1d'].index[integer_index].to_pydatetime()
def get_prices(self) -> pd.DataFrame:
return self.market.get(
ticker=self.ticker,
interval='1d',
start=self.start,
end=self.end,
features=list(self.market.DOWNLOAD_MANDATORY_FEATURES) + [self.decision_price_feature],
squeeze=False
)
def get_decision_prices(self, t_tick: Optional[int] = None, **kwargs) -> pd.Series:
if t_tick is None:
decision_prices = self.prices.loc[slice(None), self.decision_price_feature]
decision_prices.name = 'decision_price'
else:
t_datetime = self.index_to_datetime(t_tick)
decision_prices = self.prices.loc[t_datetime, self.decision_price_feature]
decision_prices = pd.Series(decision_prices, index=[self.ticker], name='decision_price')
return decision_prices
def compute_mean_price(self, start: datetime, end: datetime) -> Union[pd.DataFrame, pd.Series]:
period_data = self.data['1d'].loc[start:end, self.decision_price_feature]
period_mean = period_data.mean()
return pd.Series(period_mean, index=[self.ticker], name='mean_price')
def inverse_scaling(self, observation: dict, asset_idx: int = -1) -> dict:
for interval in self.intervals:
if asset_idx == -1:
observation[interval] = self.scaler.inverse_transform(observation[interval])
else:
observation[interval][:, :, asset_idx, :] = self.scaler.inverse_transform(
observation[interval][:, :, asset_idx, :]
)
return observation
class MultiAssetDataset(AssetDataset):
# TODO: Implement the multi-asset dependency within a DataFrame for faster processing.
def __init__(
self,
datasets: List[SingleAssetDataset],
storage_dir: str,
market: Market,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
render_tickers: List[str],
mode: Mode,
logger: Logger,
window_size: int = 1,
attached_datasets: Optional[List[SingleAssetDataset]] = None
):
super().__init__(
market=market,
storage_dir=storage_dir,
intervals=intervals,
features=features,
decision_price_feature=decision_price_feature,
period=period,
render_intervals=render_intervals,
mode=mode,
logger=logger,
window_size=window_size,
)
self.datasets = datasets
self.render_tickers = render_tickers
self.attached_datasets = attached_datasets if attached_datasets is not None else []
assert self.datasets[0].num_days * len(self.datasets) == sum([dataset.num_days for dataset in self.datasets]), \
'All the datasets should have the same length.'
@property
def num_days(self) -> int:
# All the datasets have the same number of days, because they are reflecting the same time (eg. the same month).
return self.datasets[0].num_days
@property
def num_assets(self) -> int:
return len(self.datasets)
@property
def asset_tickers(self) -> List[str]:
return [dataset.ticker for dataset in self.datasets]
@cached_property
def should_render(self) -> bool:
return any([dataset.should_render for dataset in self.datasets])
def index_to_datetime(self, integer_index: Union[int, Iterable]) -> Union[datetime, Iterable[datetime]]:
# All the datasets have the same indices to dates mappings.
return self.datasets[0].index_to_datetime(integer_index)
def __len__(self):
# All the datasets have the same length.
return len(self.datasets[0])
def __getitem__(self, current_index: int) -> Dict[str, np.array]:
datasets = self.datasets + self.attached_datasets
stacked_items: Dict[str, list] = defaultdict(list)
for dataset in datasets:
item = dataset[current_index]
for key, value in item.items():
stacked_items[key].append(value)
for key, value in stacked_items.items():
stacked_items[key] = np.stack(stacked_items[key], axis=2)
return stacked_items
def inverse_scaling(self, observation: dict, **kwargs) -> dict:
for asset_idx in range(self.num_assets):
dataset = self.datasets[asset_idx]
observation = dataset.inverse_scaling(observation, asset_idx)
return observation
def __str__(self):
asset_tickers = [ticker.split('-')[0] for ticker in self.asset_tickers]
return '-'.join(asset_tickers)
def get_prices(self) -> pd.DataFrame:
prices = []
for dataset in self.datasets:
dataset_prices = dataset.get_prices()
dataset_prices = dataset_prices.assign(ticker=dataset.ticker)
dataset_prices = dataset_prices.set_index(keys=['ticker'], drop=True, append=True)
prices.append(dataset_prices)
prices = pd.concat(prices)
return prices
def get_labels(self, t_tick: Optional[int] = None) -> Union[pd.DataFrame, pd.Series]:
labels = []
for dataset in self.datasets:
ticker_labels = getattr(dataset, 'labels', pd.Series())
ticker_labels.name = dataset.ticker
labels.append(ticker_labels)
labels = pd.concat(labels, axis=1)
if len(labels) < t_tick:
return pd.Series()
if t_tick is not None:
labels = labels.iloc[t_tick]
return labels
def get_decision_prices(self, t_tick: Optional[int] = None, ticker: Optional[str] = None) -> pd.Series:
if ticker is not None:
datasets = [self._pick_dataset(ticker=ticker)]
else:
datasets = self.datasets
prices = []
for dataset in datasets:
decision_prices = dataset.get_decision_prices(t_tick)
decision_prices.name = dataset.ticker
prices.append(decision_prices)
if t_tick is None:
prices = | pd.concat(prices, axis=1) | pandas.concat |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
import pandas as pd
from pandas import Series
import json
import os
from . import find_pmag_dir
pmag_dir = find_pmag_dir.get_pmag_dir()
data_model_dir = os.path.join(pmag_dir, 'pmagpy', 'data_model')
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(data_model_dir):
data_model_dir = os.path.join(pmag_dir, 'data_model')
class Vocabulary(object):
def __init__(self):
self.vocabularies = []
self.possible_vocabularies = []
self.all_codes = []
self.code_types = []
self.er_methods = []
self.pmag_methods = []
self.age_methods = []
def get_one_meth_type(self, mtype, method_list):
"""
Get all codes of one type (i.e., 'anisotropy_estimation')
"""
cond = method_list['dtype'] == mtype
codes = method_list[cond]
return codes
def get_one_meth_category(self, category, all_codes, code_types):
"""
Get all codes in one category (i.e., all pmag codes).
This can include multiple method types (i.e., 'anisotropy_estimation', 'sample_prepartion', etc.)
"""
categories = Series(code_types[code_types[category] == True].index)
cond = all_codes['dtype'].isin(categories)
codes = all_codes[cond]
return codes
def get_tiered_meth_category_offline(self, category):
path = os.path.join(data_model_dir, '{}_methods.txt'.format(category))
dfile = open(path)
json_data = json.load(dfile)
dfile.close()
return json_data
def get_meth_codes(self):
print('-I- Getting cached method codes for 2.5')
er_methods = self.get_tiered_meth_category_offline('er')
pmag_methods = self.get_tiered_meth_category_offline('pmag')
age_methods = self.get_tiered_meth_category_offline('age')
path = os.path.join(data_model_dir, 'code_types.txt')
with open(path, 'r') as type_file:
raw_code_types = json.load(type_file)
code_types = pd.read_json(raw_code_types)
path = os.path.join(data_model_dir, 'all_codes.txt')
with open(path, 'r') as code_file:
raw_all_codes = json.load(code_file)
all_codes = | pd.read_json(raw_all_codes) | pandas.read_json |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
from helpers import (
austin_listings,
zc_link,
apply_clustering,
rating_clustering,
review_columns,
)
app = dash.Dash(__name__)
server = app.server
app.config.suppress_callback_exceptions = True
# CONSTANTS
grouped = austin_listings.groupby("zipcode").size()
mapbox_token = "<KEY>"
geo_colors = [
"#8dd3c7",
"#ffd15f",
"#bebada",
"#fb8072",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
"#bc80bd",
"#ccebc5",
]
bar_coloway = [
"#fa4f56",
"#8dd3c7",
"#ffffb3",
"#bebada",
"#80b1d3",
"#fdb462",
"#b3de69",
"#fccde5",
"#d9d9d9",
"#bc80bd",
"#ccebc5",
"#ffed6f",
]
intro_text = """
**About this app**
This app applies spatial clustering and regionalization analysis to discover the [dataset of AirBnb listings in the
city of Austin](http://insideairbnb.com/get-the-data.html). Models are created using [pysal](https://pysal.readthedocs.io/en/latest/)
and scikit-learn.
Select the type of model from radioitem, click on the button to run clustering and visualize output regions geographically on the map, computing may take seconds to finish. Click
on regions on the map to update the number of airbnb listings from your highlighted group.
"""
listing_txt = """
out of *{}* total listings
""".format(
grouped.sum()
)
INIT_THRESHOLD_VAL = 5
def header_section():
return html.Div(
[
html.Img(src=app.get_asset_url("dash-logo.png"), className="logo"),
html.H4("Spatial Clustering"),
],
className="header__title",
)
def make_base_map():
# Scattermapbox with geojson layer, plot all listings on mapbox
customdata = list(
zip(
austin_listings["host_name"],
austin_listings["name"],
austin_listings["host_since"],
austin_listings["price"],
austin_listings["accommodates"],
austin_listings["availability_365"],
round(austin_listings["availability_365"] / 365 * 100, 1),
)
)
mapbox_figure = dict(
type="scattermapbox",
lat=austin_listings["latitude"],
lon=austin_listings["longitude"],
marker=dict(size=7, opacity=0.7, color="#550100"),
customdata=customdata,
name="Listings",
hovertemplate="<b>Host: %{customdata[0]}</b><br><br>"
"<b>%{customdata[1]}</b><br>"
"<b>Host Since: </b>%{customdata[2]}<br>"
"<b>Price: </b>%{customdata[3]} / night<br>"
"<b>Person to accommodate: </b>%{customdata[4]}<br>"
"<b>Yearly Availability: </b>%{customdata[5]} days/year (%{customdata[6]} %)",
)
layout = dict(
mapbox=dict(
style="streets",
uirevision=True,
accesstoken=mapbox_token,
zoom=9,
center=dict(
lon=austin_listings["longitude"].mean(),
lat=austin_listings["latitude"].mean(),
),
),
shapes=[
{
"type": "rect",
"xref": "paper",
"yref": "paper",
"x0": 0,
"y0": 0,
"x1": 1,
"y1": 1,
"line": {"width": 1, "color": "#B0BEC5"},
}
],
margin=dict(l=10, t=10, b=10, r=10),
height=900,
showlegend=True,
hovermode="closest",
)
figure = {"data": [mapbox_figure], "layout": layout}
return figure
def make_map_with_clustering(sel_ind, c_type, stored_data):
"""
Update layers on map from clustering regions.
:param sel_ind: lasso-select index from map.selectedData.
:param c_type: cluster type.
:param stored_data: datastore from computing.
:return: Plotly figure object.
"""
# Group based on zipcode
figure = make_base_map()
# Decrease opacity of scatter
figure["data"][0]["marker"].update(opacity=0.02)
figure["layout"].update(
dragmode="lasso"
) # clickmode doesn't work but drag will select scatters
db = pd.DataFrame()
if c_type == "ht-cluster":
db = pd.read_json(stored_data["ht"]["data"])
elif c_type == "rating-cluster":
db, p_val = pd.read_json(stored_data["rt"]["data"]), stored_data["rt"]["p_val"]
for ind, i in enumerate(db["cl"].unique()):
# Choro cluster by zipcode, split into different colored choro layer after clustering or regionalization.
figure["data"].append(
dict(
type="choroplethmapbox",
showlegend=True,
geojson=zc_link,
locations=db[db["cl"] == i]["zipcode"],
z=list(1 for _ in range(len(db[db["cl"] == i]["zipcode"]))),
hoverinfo="location",
name="Group {}".format(ind + 1),
customdata=list(ind for _ in range(len(db[db["cl"] == i]["zipcode"]))),
selected=dict(marker=dict(opacity=1)),
unselected=dict(marker=dict(opacity=0.2)),
selectedpoints="" if ind == sel_ind or sel_ind is None else [],
marker=dict(opacity=0.8, line=dict(width=1)),
colorscale=[[0, geo_colors[ind]], [1, geo_colors[ind]]],
showscale=False,
)
)
# move scatter trace at the end and disable its hover effect
figure["data"].append(figure["data"].pop(0))
figure["data"][-1].update(dict(hoverinfo="skip", hovertemplate=""))
return figure
def make_original_property_graph():
# Property type without any grouping
types = pd.get_dummies(austin_listings["property_type"])
prop_types = types.join(austin_listings["zipcode"]).groupby("zipcode").sum()
prop_types_pct = (prop_types * 100.0).div(prop_types.sum(axis=1), axis=0)
# Plot horizontal bars
bar_fig = []
for prop in list(prop_types_pct.columns):
bar_fig.append(
go.Bar(
name=prop,
orientation="h",
y=[str(i) for i in prop_types_pct.index],
x=prop_types_pct[prop],
)
)
figure = go.Figure(
data=bar_fig,
layout=dict(
barmode="stack",
colorway=bar_coloway,
yaxis_type="category",
margin=dict(l=10, r=10, t=10, b=10),
showlegend=False,
height=1000,
yaxis=dict(title="ZipCode"),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
),
)
return figure
def make_property_graph(pct: pd.DataFrame) -> dict:
# Demographic explore patterns clustering results
bar_fig = []
for prop in pct.columns:
bar_fig.append(
go.Bar(
name=prop,
x=["Group {}".format(int(i) + 1) for i in pct.index],
y=pct[prop],
marker=dict(opacity=0.8, line=dict(color="#ddd")),
orientation="v",
)
)
fig = go.Figure(
data=bar_fig,
layout=dict(
barmode="stack",
colorway=bar_coloway,
margin=dict(l=10, r=10, t=10, b=10),
legend=dict(traceorder="reversed", font=dict(size=9)),
height=400,
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
),
)
return fig
def make_review_chart(pct_d, original=False):
fig = []
for ind, rows in pct_d.iterrows():
fig.append(
go.Scatter(
x=[x.split("_")[-1].capitalize() for x in review_columns],
y=rows,
name="Group {}".format(ind + 1) if not original else "All listings",
mode="markers+lines",
hovertemplate="<b> Review Score %{x}: </b> %{y}",
marker=dict(
size=12,
opacity=0.8,
color=geo_colors[ind],
line=dict(width=1, color="#ffffff"),
),
)
)
return {
"data": fig,
"layout": dict(
yaxis=dict(title="Scores"),
margin=dict(l=40, r=10, t=10, b=70),
hovermode="closest",
),
}
def populate_init_data():
# Initialize data store with default computing
ht_res = apply_clustering()
rt_res = rating_clustering(INIT_THRESHOLD_VAL)
return {
"ht": {"data": ht_res[0].to_json(), "pct": ht_res[1].to_json()},
"rt": {
"data": rt_res[0].to_json(),
"p_val": rt_res[1],
"pct_d": rt_res[2].to_json(),
},
}
# Dash App Layout
app.layout = html.Div(
children=[
dcc.Store(id="cluster-data-store", data=populate_init_data()),
# Banner
header_section(),
html.Div(
[
html.Div(
children=[
html.Div(id="intro-text", children=dcc.Markdown(intro_text)),
html.P("Austin Airbnb listings"),
html.Hr(),
dcc.Graph(id="map", config={"responsive": True}),
],
className="eight columns named-card",
),
# Categorical properties by cluster e.g (property type stacked bar)
html.Div(
children=[
html.P("Map Options"),
html.Hr(),
dcc.RadioItems(
id="cluster-ctl",
options=[
{"label": "All Listings", "value": "no-cluster"},
{
"label": "Cluster based on house type",
"value": "ht-cluster",
},
{
"label": "Regionalization based on Ratings",
"value": "rating-cluster",
},
],
value="no-cluster",
),
html.Div(
id="threshold-div",
children=[
html.P("Minimum Threshold (% of reviews / region)"),
dcc.Slider(
id="regionalization-threshold",
min=5,
max=30,
marks={
5: "5%",
10: "10%",
15: "15%",
20: "20%",
25: "25%",
30: "30%",
},
value=INIT_THRESHOLD_VAL,
tooltip={"placement": "bottom"},
),
],
),
html.Div(
html.Button(
"Run Clustering & Update Map",
id="btn-updt-map",
title="Click to run spatial clustering, computing could take seconds to complete.",
n_clicks=0,
),
className="btn-outer",
),
html.Hr(),
html.Div(
id="listing-div",
children=[
html.H6("1000", id="listing-ind"),
dcc.Markdown(listing_txt, id="total-listings"),
],
),
html.P("Property Types"),
html.Hr(),
dcc.Graph(id="geodemo-chart"),
html.P("User satisfactions"),
html.Hr(),
dcc.Graph(id="user-satisfaction"),
],
className="four columns named-card",
),
],
className="twelve columns",
),
],
className="container twelve columns",
)
# =====Callbacks=====
@app.callback(
Output("map", "figure"),
[Input("map", "clickData"), Input("cluster-data-store", "data")],
[State("cluster-ctl", "value")],
)
def update_map(region_select, ds, clustering_type):
# Update map based on selectedData and stored calculation
ctx = dash.callback_context
print("cluster type", clustering_type)
if ds is not None:
if "ht" in clustering_type or "rating" in clustering_type:
if ctx.triggered and "data-store" in ctx.triggered[0]["prop_id"]:
figure = make_map_with_clustering(None, clustering_type, ds)
return figure
if region_select is not None:
# Empty select will return the entire non-selected figure
if not len(region_select["points"]):
return make_map_with_clustering(None, clustering_type, ds)
else:
for point in region_select["points"]:
if len(str(point["customdata"])) == 1:
print(
"selected region: Group {}".format(
point["customdata"] + 1
)
)
return make_map_with_clustering(
point["customdata"], clustering_type, ds
)
return make_map_with_clustering(None, clustering_type, ds)
else:
return make_base_map()
return make_base_map()
@app.callback(
Output("cluster-data-store", "data"),
[Input("btn-updt-map", "n_clicks")],
[
State("cluster-ctl", "value"),
State("cluster-data-store", "data"),
State("regionalization-threshold", "value"),
],
)
def update_ds(n_clicks, clustering_type, cur_ds, thr):
# Apply algorithm and only apply computing once upon button=click,save it for figure loading
if n_clicks:
if clustering_type == "ht-cluster":
# Apply KMeans and update datastore
ht_res = apply_clustering()
cur_ds.update(ht={"data": ht_res[0].to_json(), "pct": ht_res[1].to_json()})
elif clustering_type == "rating-cluster":
rt_res = rating_clustering(thr)
cur_ds.update(
rt={
"data": rt_res[0].to_json(),
"p_val": rt_res[1],
"pct_d": rt_res[2].to_json(),
}
)
else:
return cur_ds
return cur_ds
return cur_ds
@app.callback(
[Output("listing-ind", "children"), Output("listing-ind", "style")],
[Input("map", "clickData"), Input("btn-updt-map", "n_clicks")],
[State("cluster-ctl", "value"), State("cluster-data-store", "data")],
)
def update_indicator(map_select, n_click, cluster_type, ds):
ctx = dash.callback_context
if ctx.triggered and "clickData" in ctx.triggered[0]["prop_id"]:
ht = pd.read_json(ds["ht"]["data"])
rt = pd.read_json(ds["rt"]["data"])
if cluster_type == "ht-cluster":
dff = ht
elif cluster_type == "rating-cluster":
dff = rt
if map_select is None:
return str(len(austin_listings)), {"color": "#550100"}
else:
for point in map_select["points"]:
if len(str(point["customdata"])) == 1:
print("selected region: Group {}".format(point["customdata"] + 1))
sel_ind = point["customdata"]
zips = dff[dff["cl"] == sel_ind]["zipcode"]
count = 0
for zip in zips:
count += grouped[str(zip)]
return str(count), {"color": geo_colors[sel_ind]}
return str(len(austin_listings)), {"color": "#550100"}
@app.callback(
Output("geodemo-chart", "figure"),
[Input("cluster-data-store", "data")],
[State("cluster-ctl", "value")],
)
def update_geodemo_chart(ds, clustering_type):
# Update upon clustering updates ds
if ds:
if clustering_type == "ht-cluster":
pct = pd.read_json(ds["ht"]["pct"]).drop(review_columns, axis=1)
return make_property_graph(pct)
if clustering_type == "rating-cluster":
pct_d = pd.read_json(ds["rt"]["pct_d"]).drop(review_columns, axis=1)
return make_property_graph(pct_d)
elif clustering_type == "no-cluster":
return make_original_property_graph()
return make_original_property_graph()
@app.callback(
Output("user-satisfaction", "figure"),
[Input("cluster-data-store", "data")],
[State("cluster-ctl", "value")],
)
def update_review_chart(ds, clustering_type):
# y: Average score, x: category, color: group
empty_fig = {
"data": [],
"layout": dict(
yaxis=dict(title="Scores"),
margin=dict(l=30, r=10, t=10, b=70),
hovermode="closest",
),
}
if clustering_type == "rating-cluster":
pct_d = | pd.read_json(ds["rt"]["pct_d"]) | pandas.read_json |
"""
Deep Learning Keras/TensorFlow regression model of EU-28 Quality of Life (QoL) levels for year 2014,
using 11 socioeconomic indicators, retrieved from Eurostat, as predictors and 1 climatic comfort predictor.
The target is 2013 EUSILC QoL survey data (ground truth).
A model to quantitatively estimate the spatial well-being (QoL) distribution across the EU-28 member states down
to the municipal level. The model is weight-driven and based on Eurostat statistics on objective key QoL indicators,
identified by the 2016 Eurostat Analytical report on subjective well-being and the 2013 EU-SILC ad-hoc module on
well-being. Additionally, some Europe 2020 strategy targets of the European Commission, deemed to be important to a
sense of personal well-being, are included, such as the risk of poverty or social exclusion and advanced educational
attainment.
A climatic comfort component based on 1961-1990 climatic normals is added to estimate the importance of (a static)
climate to QoL. Thermal comfort levels are obtained using the Universal Thermal Climate Index (UTCI) and
Predicted Mean Vote (PMV), and overall climatic comfort levels are obtained as a weighted linear combination
based on the classical Tourism Climatic Index (TCI).
To evaluate the performance of the Deep Learning model, it is compared to the results from the 2013 EU subjective
well-being survey (the ground truth).
"""
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import EarlyStopping
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.preprocessing import StandardScaler
# Import raw data as pandas dataframe "df", and instantiate a scaler
df = pd.read_csv('merged_raw_data_2014.csv', delimiter=',')
scaler = StandardScaler()
# Pearson r correlation coefficient function
def pearson_r(array1, array2):
"""
Compute Pearson correlation coefficient between two arrays.
"""
corr_mat = np.corrcoef(array1, array2)
# Return entry [0,1]
return corr_mat[0, 1]
# Create and scale the target and predictor arrays
target = df.EUSILC.as_matrix()
target_scaled = scaler.fit_transform(target)
predictors = df.drop(
['EUSILC', 'WS_QoL', 'WS_QoLadj','CNTR_CODE', 'OBJECTID', 'POINTID', 'NUTS_ID', 'STAT_LEVL_', 'LAT', 'LON'],
axis=1).as_matrix()
predictors_scaled = scaler.fit_transform(predictors)
n_cols = predictors_scaled.shape[1]
layers = 3
# Create the model and add layers
model = Sequential()
model.add(Dense(500, activation='relu', input_shape=(n_cols,)))
for i in range(1, layers):
model.add(Dense(50, activation='relu'))
# Add final output layer
model.add(Dense(1))
# Compile the model, with mean squared error loss function as a measure of model success
model.compile(optimizer='adam', loss='mean_squared_error')
# Provide early stopping if no improvements are shown
early_stopping_monitor = EarlyStopping(patience=7)
# Fit the model, splitting into training and hold-out sets
history = model.fit(predictors_scaled, target_scaled,
validation_split=0.3, epochs=50, callbacks=[early_stopping_monitor])
print("Min. validation loss: ", np.min(history.history['val_loss']))
# MODEL PREDICTING
#--------------------
# Calculate predictions and flatten the array. The predictions will be probabilities.
predictions = model.predict(predictors_scaled)
predictions_flat = predictions.ravel()
# Shift the predictions up by the difference in means to the standard 0-10 scale of the EUSILC survey data
predictions_scaled = predictions_flat - np.mean(predictions_flat) + np.mean(target)
# Create combined dataframe
probs = [('POINTID', df['POINTID']),
('QoL_pred_prob', list(predictions_scaled))]
df_predicted = | pd.DataFrame.from_items(probs) | pandas.DataFrame.from_items |
from tests.deap.conftest import building_area, building_volume
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from ber_public.deap import vent
def test_calculate_infiltration_rate_due_to_openings():
"""Output is equivalent to DEAP 4.2.0 example A"""
building_volume = pd.Series([321, 0, 100, 200])
no_chimneys = pd.Series([0, 0, 0, 1])
no_open_flues = pd.Series([0, 0, 0, 1])
no_fans = pd.Series([1, 0, 0, 1])
no_room_heaters = pd.Series([0, 0, 0, 1])
is_draught_lobby = pd.Series(["NO", "NO", "YES", "NO"])
expected_output = pd.Series([0.08, 0, 0, 0.6])
output = vent._calculate_infiltration_rate_due_to_openings(
building_volume=building_volume,
no_chimneys=no_chimneys,
no_open_flues=no_open_flues,
no_fans=no_fans,
no_room_heaters=no_room_heaters,
is_draught_lobby=is_draught_lobby,
draught_lobby_boolean=vent.YES_NO,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_infiltration_rate_due_to_structure():
"""Output is equivalent to DEAP 4.2.0 example A"""
is_permeability_tested = pd.Series(["YES", "NO", "NO"])
permeability_test_result = pd.Series([0.15, np.nan, np.nan])
no_storeys = pd.Series([np.nan, 2, 1])
percentage_draught_stripped = pd.Series([np.nan, 100, 75])
is_floor_suspended = pd.Series(
[np.nan, "No ", "Yes (Unsealed) "]
)
structure_type = pd.Series(
[np.nan, "Masonry ", "Timber or Steel Frame "]
)
expected_output = pd.Series([0.15, 0.5, 0.55])
output = vent._calculate_infiltration_rate_due_to_structure(
is_permeability_tested=is_permeability_tested,
permeability_test_result=permeability_test_result,
no_storeys=no_storeys,
percentage_draught_stripped=percentage_draught_stripped,
is_floor_suspended=is_floor_suspended,
structure_type=structure_type,
suspended_floor_types=vent.SUSPENDED_FLOOR_TYPES,
structure_types=vent.STRUCTURE_TYPES,
permeability_test_boolean=vent.YES_NO,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_infiltration_rate(monkeypatch):
"""Output is equivalent to DEAP 4.2.0 example A"""
no_sides_sheltered = pd.Series([2, 2])
def _mock_calculate_infiltration_rate_due_to_openings(*args, **kwargs):
return pd.Series([0.08, 0.08])
def _mock_calculate_infiltration_rate_due_to_structure(*args, **kwargs):
return pd.Series([0.15, 0.5])
monkeypatch.setattr(
vent,
"_calculate_infiltration_rate_due_to_openings",
_mock_calculate_infiltration_rate_due_to_openings,
)
monkeypatch.setattr(
vent,
"_calculate_infiltration_rate_due_to_structure",
_mock_calculate_infiltration_rate_due_to_structure,
)
expected_output = pd.Series([0.2, 0.49])
output = vent.calculate_infiltration_rate(
no_sides_sheltered=no_sides_sheltered,
building_volume=None,
no_chimneys=None,
no_open_flues=None,
no_fans=None,
no_room_heaters=None,
is_draught_lobby=None,
is_permeability_tested=None,
permeability_test_result=None,
no_storeys=None,
percentage_draught_stripped=None,
is_floor_suspended=None,
structure_type=None,
draught_lobby_boolean=None,
suspended_floor_types=None,
structure_types=None,
permeability_test_boolean=None,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_effective_air_rate_change():
"""Output is equivalent to DEAP 4.2.0 example A"""
n_methods = 6
ventilation_method = pd.Series(
[
"Natural vent.",
"Pos input vent.- loft",
"Pos input vent.- outside",
"Whole house extract vent.",
"Bal.whole mech.vent no heat re",
"Bal.whole mech.vent heat recvr",
]
)
building_volume = pd.Series([321] * n_methods)
infiltration_rate = | pd.Series([0.2] * n_methods) | pandas.Series |
"""
aoe2netwrapper.converters
-------------------------
This module implements a high-level class with static methods to convert result of AoENetAPI methods to
pandas DataFrames.
"""
from typing import List
from loguru import logger
from aoe2netwrapper.models import (
LastMatchResponse,
LeaderBoardResponse,
MatchLobby,
NumOnlineResponse,
RatingTimePoint,
StringsResponse,
)
try:
import pandas as pd
except ImportError as error:
logger.error(
"User tried to use the 'converters' submodule without havinig installed the 'pandas' library."
)
raise NotImplementedError(
"The 'aoe2netwrapper.converters' module exports results to 'pandas.DataFrame' objects and "
"needs the 'pandas' library installed to function."
) from error
class Convert:
"""
This is a convenience class providing methods to convert the outputs from the AoE2NetAPI query methods
into pandas DataFrame objects. Every method below is a staticmethod, so no object has to be instantiated.
"""
@staticmethod
def strings(strings_response: StringsResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().strings to a pandas DataFrame.
Args:
strings_response (StringsResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the StringsResponse, each column being the values for a 'string' used
by the API, and the index being the ID numbers. Since this is the result of a join for many
'strings' that do not have the same amount of values, the resulting dataframe will contain NaNs
wherever a given 'string' does not have a value for the given index ID.
"""
if not isinstance(strings_response, StringsResponse):
logger.error("Tried to use method with a parameter of type != StringsResponse")
raise TypeError("Provided parameter should be an instance of 'StringsResponse'")
logger.debug("Converting StringsResponse to DataFrame")
dframe = pd.DataFrame(strings_response).transpose()
dframe.columns = dframe.iloc[0]
dframe = dframe.drop(index=[0]).reset_index(drop=True)
dframe = dframe.drop(columns=["language"])
logger.trace("Exporting each string attribute to its own dataframe and joining")
result = pd.DataFrame()
for col in dframe.columns:
intermediate = pd.DataFrame()
intermediate[col] = dframe[col][0]
intermediate["id"] = intermediate[col].apply(lambda x: x.id)
intermediate[col] = intermediate[col].apply(lambda x: x.string)
result = result.join(intermediate.set_index("id"), how="outer")
return result
@staticmethod
def leaderboard(leaderboard_response: LeaderBoardResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().leaderboard to a pandas DataFrame.
Args:
leaderboard_response (LeaderBoardResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the LeaderBoardResponse, each row being an entry in the leaderboard.
Top level attributes such as 'start' or 'total' are broadcast to an entire array the size of
the dataframe, and timestamps are converted to datetime objects.
"""
if not isinstance(leaderboard_response, LeaderBoardResponse):
logger.error("Tried to use method with a parameter of type != LeaderBoardResponse")
raise TypeError("Provided parameter should be an instance of 'LeaderBoardResponse'")
logger.debug("Converting LeaderBoardResponse leaderboard to DataFrame")
dframe = pd.DataFrame(leaderboard_response.leaderboard)
dframe = _export_tuple_elements_to_column_values_format(dframe)
logger.trace("Inserting LeaderBoardResponse attributes as columns")
dframe["leaderboard_id"] = leaderboard_response.leaderboard_id
dframe["start"] = leaderboard_response.start
dframe["count"] = leaderboard_response.count
dframe["total"] = leaderboard_response.total
logger.trace("Converting datetimes")
dframe["last_match"] = pd.to_datetime(dframe["last_match"], unit="s")
dframe["last_match_time"] = pd.to_datetime(dframe["last_match_time"], unit="s")
return dframe
@staticmethod
def lobbies(lobbies_response: List[MatchLobby]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().lobbies to a pandas DataFrame. The resulting
DataFrame will contain several rows for each lobby, namely as many as there are players in said
lobby. All global attributes of each lobby are broadcasted to arrays, making them duplicates.
To isolate a specific lobby, either call the AoE2NetAPI().match method with the lobby's UUID or
make use of the groupby functionality of pandas DataFrames.
Args:
lobbies_response (List[MatchLobby]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of MatchLobby elements..
"""
if not isinstance(lobbies_response, list): # move list to List[MatchLobby] when supporting > 3.9
logger.error("Tried to use method with a parameter of type != List[MatchLobby]")
raise TypeError("Provided parameter should be an instance of 'List[MatchLobby]'")
logger.debug("Converting Lobbies response to DataFrame")
unfolded_lobbies = [_unfold_match_lobby_to_dataframe(match_lobby) for match_lobby in lobbies_response]
return pd.concat(unfolded_lobbies).reset_index(drop=True)
@staticmethod
def last_match(last_match_response: LastMatchResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().last_match to a pandas DataFrame. There is not
much use to this as the DataFrame will only have one row, but the method is provided nonetheless in
case users want to concatenate several of these results in a DataFrame.
Args:
last_match_response (LastMatchResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of LastMatchResponse attributes. Beware: the 'players'
column is directly the content of the 'LastMatchResponse.last_match.players' attribute and as
such holds a list of LobbyMember objects.
"""
if not isinstance(last_match_response, LastMatchResponse):
logger.error("Tried to use method with a parameter of type != LastMatchResponse")
raise TypeError("Provided parameter should be an instance of 'LastMatchResponse'")
logger.debug("Converting LastMatchResponse last_match to DataFrame")
dframe = pd.DataFrame(last_match_response.last_match).transpose()
dframe.columns = dframe.iloc[0]
dframe = dframe.drop(0).reset_index()
logger.trace("Inserting LastMatchResponse attributes as columns")
dframe["profile_id"] = last_match_response.profile_id
dframe["steam_id"] = last_match_response.steam_id
dframe["name"] = last_match_response.name
dframe["country"] = last_match_response.country
return dframe
@staticmethod
def match_history(match_history_response: List[MatchLobby]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().match_history to a pandas DataFrame. The resulting
DataFrame will contain several rows for each lobby, namely as many as there are players in said
lobby. All global attributes of each lobby are broadcasted to arrays, making them duplicates.
To isolate a specific lobby, either call the AoE2NetAPI().match method with the lobby's UUID or
make use of the groupby functionality of pandas DataFrames.
Args:
match_history_response (List[MatchLobby]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of MatchLobby elements.
"""
# move list to List[MatchLobby] when supporting > 3.9
if not isinstance(match_history_response, list):
logger.error("Tried to use method with a parameter of type != List[MatchLobby]")
raise TypeError("Provided parameter should be an instance of 'List[MatchLobby]'")
logger.debug("Converting Match History response to DataFrame")
unfolded_lobbies = [
_unfold_match_lobby_to_dataframe(match_lobby) for match_lobby in match_history_response
]
return pd.concat(unfolded_lobbies).reset_index(drop=True)
@staticmethod
def rating_history(rating_history_response: List[RatingTimePoint]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().leaderboard to a pandas DataFrame.
Args:
rating_history_response (List[RatingTimePoint]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of RatingTimePoint elements, each row being the information from
one RatingTimePoint in the list. Timestamps are converted to datetime objects.
"""
# move list to List[RatingTimePoint] when supporting > 3.9
if not isinstance(rating_history_response, list):
logger.error("Tried to use method with a parameter of type != List[RatingTimePoint]")
raise TypeError("Provided parameter should be an instance of 'List[RatingTimePoint]'")
logger.debug("Converting Rating History rsponse to DataFrame")
dframe = pd.DataFrame(rating_history_response)
dframe = _export_tuple_elements_to_column_values_format(dframe)
logger.trace("Converting timestamps to datetime objects")
dframe["time"] = pd.to_datetime(dframe["timestamp"], unit="s")
dframe = dframe.drop(columns=["timestamp"])
return dframe
@staticmethod
def matches(matches_response: List[MatchLobby]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().match_history to a pandas DataFrame. The resulting
DataFrame will contain several rows for each lobby, namely as many as there are players in said
lobby. All global attributes of each lobby are broadcasted to arrays, making them duplicates.
To isolate a specific lobby, either call the AoE2NetAPI().match method with the lobby's UUID or
make use of the groupby functionality of pandas DataFrames.
Args:
matches_response (List[MatchLobby]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of MatchLobby elements.
"""
if not isinstance(matches_response, list): # move list to List[MatchLobby] when supporting > 3.9
logger.error("Tried to use method with a parameter of type != List[MatchLobby]")
raise TypeError("Provided parameter should be an instance of 'List[MatchLobby]'")
logger.debug("Converting Match History response to DataFrame")
unfolded_lobbies = [_unfold_match_lobby_to_dataframe(match_lobby) for match_lobby in matches_response]
return pd.concat(unfolded_lobbies).reset_index(drop=True)
@staticmethod
def match(match_response: MatchLobby) -> pd.DataFrame:
"""
Convert the content of a MatchLobby to a pandas DataFrame. The resulting DataFrame will have as many
rows as there are players in the lobby, and all global attributes will be broadcasted to columns of
the same length, making them duplicates.
Args:
match_response (MatchLobby): a MatchLobby object.
Returns:
A pandas DataFrame from the MatchLobby attributes, each row being global information from the
MatchLobby as well as one of the players in the lobby.
"""
return _unfold_match_lobby_to_dataframe(match_response)
@staticmethod
def num_online(num_online_response: NumOnlineResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().num_online to a pandas DataFrame.
Args:
num_online_response (NumOnlineResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the NumOnlineResponse, each row being an entry in the leaderboard.
Top level attributes such as 'app_id' are broadcast to an entire array the size of the
dataframe, and timestamps are converted to datetime objects.
"""
if not isinstance(num_online_response, NumOnlineResponse):
logger.error("Tried to use method with a parameter of type != NumOnlineResponse")
raise TypeError("Provided parameter should be an instance of 'NumOnlineResponse'")
logger.debug("Converting NumOnlineResponse to DataFrame")
dframe = pd.DataFrame(num_online_response.dict())
logger.trace("Exporting 'player_stats' attribute contents to columns")
dframe["time"] = dframe.player_stats.apply(lambda x: x["time"]).apply(pd.to_datetime)
dframe["steam"] = dframe.player_stats.apply(lambda x: x["num_players"]["steam"])
dframe["looking"] = dframe.player_stats.apply(lambda x: x["num_players"]["looking"])
dframe["in_game"] = dframe.player_stats.apply(lambda x: x["num_players"]["in_game"])
dframe["multiplayer"] = dframe.player_stats.apply(lambda x: x["num_players"]["multiplayer"])
dframe["multiplayer_1h"] = dframe.player_stats.apply(lambda x: x["num_players"]["multiplayer_1h"])
dframe["multiplayer_24h"] = dframe.player_stats.apply(lambda x: x["num_players"]["multiplayer_24h"])
logger.trace("Removing 'player_stats' column to avoid nested & duplicate data")
dframe = dframe.drop(columns=["player_stats"])
return dframe
# ----- Helpers ----- #
def _export_tuple_elements_to_column_values_format(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
Take in a pandas DataFrame with simple int values as columns, and elements being a tuple of
(attribute_name, value) and cast it to have the attribute_name as column names, and the values as values.
The original columns will be dropped in the process.
Args:
dataframe (pd.DataFrame): your pandas DataFrame.
Returns:
The refactored pandas DataFrame.
"""
dframe = dataframe.copy(deep=True)
logger.trace("Exporting attributes to columns and removing duplicate data")
for _, col_index in enumerate(dframe.columns):
attribute = dframe[col_index][0][0]
dframe[attribute] = dframe[col_index].apply(lambda x: x[1])
dframe = dframe.drop(columns=[col_index])
return dframe
def _unfold_match_lobby_to_dataframe(match_lobby: MatchLobby) -> pd.DataFrame:
"""
Convert the content of a MatchLobby to a pandas DataFrame. The resulting DataFrame will have as many
rows as there are players in the lobby, and all global attributes will be broadcasted to columns of the
same length, making them duplicates.
Args:
match_lobby (MatchLobby): a MatchLobby object.
Returns:
A pandas DataFrame from the MatchLobby attributes, each row being global information from the
MatchLobby as well as one of the players in the lobby.
"""
if not isinstance(match_lobby, MatchLobby):
logger.error("Tried to use method with a parameter of type != MatchLobby")
raise TypeError("Provided parameter should be an instance of 'MatchLobby'")
logger.trace("Unfolding MatchLobby.players contents to DataFrame")
dframe = pd.DataFrame(match_lobby.players)
dframe = _export_tuple_elements_to_column_values_format(dframe)
dframe = dframe.rename(columns={"name": "player"})
logger.trace("Broadcasting global MatchLobby attributes")
attributes_df = pd.DataFrame()
for attribute, value in match_lobby.dict().items():
if attribute != "players":
attributes_df[attribute] = [value] * len(dframe)
dframe = attributes_df.join(dframe, how="outer")
logger.trace("Converting timestamps to datetime objects")
dframe["opened"] = pd.to_datetime(dframe["opened"], unit="s")
dframe["started"] = pd.to_datetime(dframe["started"], unit="s")
dframe["finished"] = | pd.to_datetime(dframe["finished"], unit="s") | pandas.to_datetime |
import pandas as pd
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import os
import json
def pre_process(fid=None):
fid = Path(fid)
df = pd.read_csv('gender-classifier-DFE-791531.csv', encoding='latin-1')
girls_tweets = df[df['gender']=='female'][['text','gender']]
boys_tweets = df[df['gender']=='male'][['text','gender']]
df = pd.concat([girls_tweets,boys_tweets])
df['binary'] = | pd.get_dummies(df.gender, prefix='gender') | pandas.get_dummies |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = Series([1])
s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with | tm.assert_produces_warning(None) | pandas._testing.assert_produces_warning |
#! /usr/bin/env python3
import re
import pandas as pd
from datetime import date
import category
import category_name
def is_row_in_category(row, categories):
"""Determines if row['place'] is in the given category
Args:
row (pandas.core.series.Series): Single row [date,place,amount] of a dataframe
category (list): Category list
Returns:
bool: True if row in category, False otherwise
"""
for place in categories:
if re.search(place, row['place'], re.IGNORECASE):
return True
return False
def organise_data_by_category(my_dataframe):
"""Parse all spending and populate smaller dataframes by categories
Args:
my_dataframe (pandas.core.frame.DataFrame): my_dataframe Unparsed dataframe with all uncategorized expenses
Returns:
dict: A dictionary of dataframe. [key] = category name; [value] = dataframe with the all categorie's related expenses
"""
print("Organise spendings into categories")
# it is 3 times faster to create a dataframe from a full dictionary rather than appending rows after rows to an already existing dataframe
dic_groc, dic_trans, dic_rest, dic_coffee, dic_bar, dic_misc, dic_bills = {}, {}, {}, {}, {}, {}, {}
g, t, r, c, b, m, f = [0] * 7 # indexes
# Let's go over each rows of the unsorted dataframe and populate the category's dictionary.
for _, row in my_dataframe.iterrows():
if is_row_in_category(row, category.Groceries):
dic_groc[g] = row
g = g + 1
continue
if is_row_in_category(row, category.Transport):
dic_trans[t] = row
t = t + 1
continue
if is_row_in_category(row, category.Restaurant):
dic_rest[r] = row
r = r + 1
continue
if is_row_in_category(row, category.Coffee):
dic_coffee[c] = row
c = c + 1
continue
if is_row_in_category(row, category.Bar):
dic_bar[b] = row
b = b + 1
continue
if is_row_in_category(row, category.Bills):
dic_bills[f] = row
f = f + 1
continue
# If none of the above then let's put it in misc spending
dic_misc[m] = row
m = m + 1
df_groc = pd.DataFrame.from_dict(dic_groc, orient='index', columns=['date', 'place', 'amount'])
df_trans = pd.DataFrame.from_dict(dic_trans, orient='index', columns=['date', 'place', 'amount'])
df_rest = pd.DataFrame.from_dict(dic_rest, orient='index', columns=['date', 'place', 'amount'])
df_coffee = | pd.DataFrame.from_dict(dic_coffee, orient='index', columns=['date', 'place', 'amount']) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import utils
class Indicators:
def __init__(self, stock, start_date, end_date):
self.stock = stock
self.start_date = start_date
self.end_date = end_date
self.data = utils.read_stock_data(stock)
def calculate_all_indicators(self):
indicators = [
self.adj_close_price(),
self.bollinger_bands(),
self.cci(4),
self.cci(12),
self.cci(20),
self.ema(2),
self.ema(6),
self.ema(10),
self.ema(12),
self.macd(),
self.mfi(14),
self.mfi(16),
self.mfi(18),
self.obv(),
self.px_volume(),
self.rsi(6),
self.rsi(12),
self.sma(3),
self.sma(10),
self.trix(),
self.volatility(2),
self.volatility(4),
self.volatility(6),
self.volatility(8),
self.volatility(10),
self.volatility(12),
self.volatility(14),
self.volatility(16),
self.volatility(18),
self.volatility(20),
self.willr()
]
dates = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)['Date']
df = | pd.concat(indicators, axis=1) | pandas.concat |
import csv
import os
import re
import string
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import pandas as pd
import numpy as np
class HeatmapPlotter:
def __init__(self, path, component):
self.path = path
self.component = component
# Function to load results in Runs
def _load_predictions_pearson(self):
name = self.component + '.*?csv$'
response = [file for file in os.listdir(self.path) if re.match(name, file)]
with open(os.path.join(self.path, response[0]), newline='') as f:
reader = csv.reader(f)
data = list(reader)
data = [list(map(float, a)) for a in data]
data = np.stack(data, axis=0)
return data
def create_heatmap(self):
data = self._load_predictions_pearson()
labels = [list(string.ascii_uppercase)[a] for a in range(data.__len__())]
plt.figure()
ax = plt.axes()
r_heatmap = sns.heatmap(data, xticklabels=labels, yticklabels=labels, cmap="RdYlBu_r",
vmin=-0.1, vmax=1, ax=ax)
ax.set_title(self.component)
figure = r_heatmap.get_figure()
figure.savefig(os.path.join(self.path, self.component + '.png'), dpi=300)
class BarPlotter:
def __init__(self, path, components):
self.path = path
self.components = components
def _load_predictions_pearson(self, component):
name = component + '.*?csv$'
response = [file for file in os.listdir(self.path) if re.match(name, file)]
with open(os.path.join(self.path, response[0]), newline='') as f:
reader = csv.reader(f)
data = list(reader)
data = [list(map(float, a)) for a in data]
data = np.stack(data, axis=0)
return data
def create_bar(self):
df = pd.DataFrame({'A': [], 'B': [], 'C': []})
if 'pairs_structure' in self.path:
labels = ['Initial \npair', 'Initial \nshuffled', 'Settled \npair', 'Settled \nshuffled']
if 'associative_inference' in self.path:
labels = ['Initial \ntransitive', 'Initial \ndirect', 'Settled \ntransitive', 'Settled \ndirect']
for a in self.components:
data_initial = self._load_predictions_pearson("pearson_initial_test_" + a)
tmp_df_A = pd.DataFrame({'A': data_initial[:, 0], 'B': labels[0]})
tmp_df_B = pd.DataFrame({'A': data_initial[:, 1], 'B': labels[1]})
tmp_df_ini = | pd.concat([tmp_df_A, tmp_df_B]) | pandas.concat |
import requests
import pandas as pd
class Vacinacao():
url = 'https://imunizacao-es.saude.gov.br/_search?scroll=1m'
url_scroll = 'https://imunizacao-es.saude.gov.br/_search/scroll'
usuario = 'imunizacao_public'
senha = '<PASSWORD>'
def __init__(self):
pass
def _arruma_colunas(self,data_frame):
data_frame.drop(['_index', '_type', '_id', '_score'], axis=1, inplace=True)
colunas = data_frame.columns
colunas = [coluna.replace('_source.', '') for coluna in colunas]
data_frame.columns = colunas
data_frame.set_index('document_id', inplace=True)
return data_frame
def get_dados_vacinacao(self,filtro=None,limite_memoria=None,path='',paginas=None):
arquivo_nome_parte = 'VACINACAO_'
data = {'size': '10000'}
if filtro:
if type(filtro) == dict:
data = {'size': '10000', 'query': {'match': filtro}}
for key, value in filtro.items():
arquivo_nome_parte += str(key) + '_' + str(value) + '_'
else:
raise Exception(f'O argumento filtro deve ser um dicionário. Foi recebido {type(filtro)}')
pagina = 1
primeira_pagina = 1
estouro_memoria = False
reposta_ok = False
r = requests.post(self.url, json=data, auth=(self.usuario, self.senha))
if r.ok:
resposta_ok = True
resposta = r.json()
hits = resposta['hits']['hits']
if not hits or len(hits) == 0:
raise Exception(f'Nenhum valor retornado para o filtro {filtro}')
df = pd.json_normalize(resposta, ['hits', 'hits'])
df = self._arruma_colunas(df)
memory_usage = df.memory_usage().sum() / (1024 ** 2)
print(f'Página {pagina} - Memory Usage:{"{:3.2f}".format(memory_usage)}MB')
pagina += 1
hits = resposta['hits']['hits']
data = {'scroll_id': resposta['_scroll_id'], 'scroll': '1m'}
while r.ok and hits != None and len(hits) > 0 and (not paginas or pagina <= paginas):
r = requests.post(self.url_scroll, json=data, auth=(self.usuario, self.senha))
if r.ok:
resposta = r.json()
try:
hits = resposta['hits']['hits']
data = {'scroll_id': resposta['_scroll_id'], 'scroll': '1m'}
if limite_memoria and memory_usage > limite_memoria:
arquivo = arquivo_nome_parte + 'PAGINA_' + str(primeira_pagina) + '_A_' + str(
pagina - 1) + '.csv'
df.to_csv(path + arquivo, sep=';', decimal=',', encoding='latin1')
print(f'Arquivo {arquivo} salvo em {path}')
df = | pd.json_normalize(resposta, ['hits', 'hits']) | pandas.json_normalize |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import sys
import matplotlib.pyplot as plt
sys.path.insert(1, '../MLA')
import imp
import numpy as np
import xgboost_wrapper as xw
import regression_wrappers as rw
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# In[2]:
from hyperopt import hp, tpe
from hyperopt.fmin import fmin
from hyperopt import STATUS_OK
from sklearn.model_selection import cross_val_score, StratifiedKFold
# In[3]:
# A = pd.read_csv("CHESS%20COVID19%20CaseReport%2020200401.csv")
A = pd.read_csv("CHESS COVID19 CaseReport 20200628.csv")
#
# In[4]:
A_ = pd.read_csv("CHESS COVID19 CaseReport 20200628.csv")
idx = A_['asymptomatictesting']=='Yes'
A_ = A_.loc[idx,][['infectionswabdate', 'hospitaladmissiondate']]
lag = pd.to_datetime(A_['infectionswabdate']).dt.round('D') - pd.to_datetime(A_['hospitaladmissiondate']).dt.round('D')
# In[5]:
# plt.hist(lag.dt.days, bins=50);
print('swab at or after admission:')
print(np.sum(lag.dt.days >= 0))
print('swab before admission:')
print(np.sum(lag.dt.days < 0))
# In[6]:
# A = pd.read_csv("CHESS COVID19 CaseReport 20200601.csv")
def to_eliminate(x):
if str(x.finaloutcomedate) == 'nan':
if str(x.finaloutcome) == 'nan':
return True
elif 'still on unit' in str(x.finaloutcome):
return True
else:
return False
elif str(x.finaloutcomedate) == '1900-01-01':
return True
else:
return False
to_elimi = A[['finaloutcomedate','dateadmittedicu','finaloutcome']].apply(to_eliminate, axis=1)
# In[7]:
to_elimi.sum()
# In[8]:
A['dateupdated'] = pd.to_datetime(A['dateupdated']).dt.round('D')
A[['hospitaladmissiondate','finaloutcomedate','dateadmittedicu','finaloutcome','dateupdated']].head()
# In[9]:
A = A[~to_elimi]
# In[10]:
pd.to_datetime(A['hospitaladmissiondate']).min(), pd.to_datetime( A['dateleavingicu']).max()
# In[11]:
A = A.loc[~A.caseid.duplicated()]
# In[12]:
A = A.rename(columns={'immunosuppressiontreatmentcondit': 'immunosuppressiontreatmentcondition'})
A = A.rename(columns={'immunosuppressiondiseaseconditio': 'immunosuppressiondiseasecondition'})
# In[13]:
for feature in ['isviralpneumoniacomplication',
'isardscomplication', 'isunknowncomplication',
'isothercoinfectionscomplication', 'isothercomplication',
'issecondarybacterialpneumoniacom',
'chronicrespiratory', 'asthmarequiring',
'chronicheart',
'chronicrenal', 'asymptomatictesting',
'chronicliver',
'chronicneurological',
'immunosuppressiontreatment', 'immunosuppressiondisease', 'obesityclinical', 'pregnancy',
'other',
'hypertension', 'seriousmentalillness']:
A[feature] = A[feature].apply(lambda x: 1 if 'Yes' in str(x) else 0)
# In[14]:
A = A.rename(columns={'other': 'other_comorbidity'})
# In[15]:
A['age10year'] = A['ageyear'].apply(lambda x: x/10)
# In[16]:
A['sex_is_M'] = A['sex'].apply(lambda x: 1 if 'Male' in x else 0)
# In[17]:
A['sex_is_unkn'] = A['sex'].apply(lambda x: 1 if 'Unknown' in x else 0)
# In[18]:
A = A.drop(columns = ['ageyear', 'sex'])
# In[19]:
A['ethnicity'] = A['ethnicity'].apply(lambda x: 'Eth. NA' if pd.isna(x) else x)
# In[20]:
A['ethnicity'] = A['ethnicity'].apply(lambda x: x.strip(' '))
# In[21]:
def stratify(df, feature):
keys = [str(s) for s in df[feature].unique()]
keys = list(set(keys))
df = df.copy()
for key in keys:
df[key.strip(' ')] = df[feature].apply(lambda x, key: 1 if str(x)==key else 0, args=(key, ))
return df.drop(columns=feature)
# In[22]:
A['ethnicity'].value_counts()
# In[23]:
A['ethnicity'].value_counts().sum(), A.shape
# In[24]:
A = stratify(A, 'ethnicity')
# In[25]:
A = A.rename(columns={'Unknown':'Eth. unknown',
'other':'Other ethn.',
'White British': 'White British',
'Other White': 'Other white',
'Other Asian': 'Other Asian',
'Black African':'Black African',
'Black Caribbean':'Black Caribbean',
'Other Black': 'Other black',
'White Irish': 'White Irish',
'White and Black Caribbean':'White and black Caribbean',
'Other mixed':'Other mixed',
'White and Black African':'White and black African',
'White and Asian':'White and Asian'})
# In[26]:
def diabetes_type(x):
if x.isdiabetes == 'Yes':
if x.diabetestype == 'Type I':
return 'T1 diabetes'
else:
return 'T2 diabetes'
else:
return np.nan
# In[27]:
A['diabetes'] = A[['isdiabetes', 'diabetestype']].apply(diabetes_type, axis=1)
# In[28]:
A = stratify(A, 'diabetes')
# In[29]:
# drop nan column created from stratification of diabetes categorical
A = A.drop(columns=['isdiabetes','nan', 'diabetestype'])
# In[30]:
A = A.drop(columns=['organismname'])
# In[31]:
to_drop = ['trustcode', 'trustname', 'dateupdated', 'weekno',
'weekofadmission', 'yearofadmission', 'agemonth', 'postcode',
'estimateddateonset', 'infectionswabdate', 'labtestdate',
'typeofspecimen', 'otherspecimentype', 'covid19',
'influenzaah1n1pdm2009', 'influenzaah3n2', 'influenzab',
'influenzaanonsubtyped', 'influenzaaunsubtypable', 'rsv',
'otherresult', 'otherdetails', 'admissionflu', 'admissionrsv',
'admissioncovid19', 'admittedfrom', 'otherhospital', 'hospitaladmissionhours',
'hospitaladmissionminutes', 'hospitaladmissionadmittedfrom',
'wasthepatientadmittedtoicu',
'hoursadmittedicu', 'minutesadmittedicu', 'sbother', 'sbdate', 'respiratorysupportnone',
'oxygenviacannulaeormask', 'highflownasaloxygen',
'noninvasivemechanicalventilation',
'invasivemechanicalventilation', 'respiratorysupportecmo',
'mechanicalinvasiveventilationdur', 'anticovid19treatment',
'chronicrespiratorycondition', 'respiratorysupportunknown',
'asthmarequiringcondition', 'seriousmentalillnesscondition',
'chronicheartcondition', 'hypertensioncondition',
'immunosuppressiontreatmentcondition',
'immunosuppressiondiseasecondition',
'obesitybmi', 'gestationweek', 'travelin14days',
'travelin14dayscondition', 'prematurity', 'chroniclivercondition',
'worksashealthcareworker', 'contactwithconfirmedcovid19case',
'contactwithconfirmedcovid19casec', 'othercondition',
'transferdestination', 'chronicneurologicalcondition',
'outcomeother', 'causeofdeath', 'chronicrenalcondition', 'othercomplication']
# In[32]:
A = A.drop(columns=to_drop)
# In[33]:
A['caseid'] = A['caseid'].astype(int)
# In[34]:
A = A.set_index('caseid')
# In[35]:
A = A.loc[A.age10year > 0]
# In[36]:
A['is_ICU'] = ~A['dateadmittedicu'].isna()
# In[37]:
A['is_death'] = A['finaloutcome'] == 'Death'
# In[38]:
print(A['is_ICU'].sum())
# In[39]:
print(A['is_death'].sum())
# In[40]:
print((A['is_death'] & A['is_ICU']).sum())
# In[41]:
A.to_csv('CHESS_comorb_only_with_outcome.csv')
# In[42]:
A = pd.read_csv('CHESS_comorb_only_with_outcome.csv')
A = A.set_index('caseid')
# In[43]:
min_date = pd.to_datetime(A.hospitaladmissiondate).min()
A['day_from_beginning1'] = (pd.to_datetime(A.hospitaladmissiondate) - min_date).dt.days
plt.hist(A['day_from_beginning1'], bins=100);
# In[44]:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
trasformed=sc.fit_transform(A['day_from_beginning1'].values.reshape(-1,1))
A['days from beginning'] = trasformed.flatten() #+ np.mean(trasformed.flatten())
A = A.drop(columns=['day_from_beginning1'])
# In[45]:
plt.hist(A['days from beginning'], bins=100);
# In[46]:
A['clinical experience'] = A['days from beginning'].rank()
A['clinical experience'] = A['clinical experience'] / A['clinical experience'].max()
# In[47]:
plt.hist(A['clinical experience'], bins=100);
# In[48]:
def int_to_date(x, min_date):
timestamp = pd.to_timedelta(x, unit='D') + min_date
return '-'.join([str(timestamp.day), str(timestamp.month), str(timestamp.year)[-2:]])
# pd.to_timedelta(A['day_from_beginning1'], unit='D') + min_date).head()
# In[49]:
A['is_death'].sum(), A['is_ICU'].sum()
# In[50]:
a = (10 * A['age10year']).value_counts().reset_index().sort_values(by='index').values
plt.plot(a[:,0], a[:,1],'.')
plt.xlabel('age');
(10 * A['age10year']).describe()
# In[51]:
dizionario = {'chronicrespiratory':'Chronic respiratory disease',
'asthmarequiring':'Asthma',
'chronicheart':'Chronic heart disease',
'chronicrenal':'Chronic renal disease',
'chronicneurological':'Chronic neurological cond.',
'immunosuppressiontreatment':'Immunosuppression treatment',
'immunosuppressiondisease':'Immunosuppression disease',
'obesityclinical':'Obesity (clinical)',
'other_comorbidity':'Other comorbidity',
'age10year': 'Age (x10 years)',
'sex_is_M':'Sex male',
'sex_is_unkn':'Sex unknown',
'asymptomatictesting':'Asymptomatic testing',
'seriousmentalillness':'Serious mental illness',
'chronicliver':'Chronic liver',
'chronicliver_fatty':'Chronic fat liver',
'chronicliver_alcohol':'Chronic alcohol. liver',
'chronicliver_other': 'Chronic liver disease',
'hypertension': 'Hypertension',
'pregnancy': 'Pregnancy'}
# In[52]:
A = A.rename(columns=dizionario)
# In[53]:
A['Sex unknown'].sum() / A.shape[0] * 100
# In[54]:
A['Sex male'].sum() / A.shape[0] * 100
# In[55]:
A[A['is_ICU']]['is_death'].sum()
# In[56]:
A.shape
# In[57]:
A = A.rename(columns={'days from beginning': 'Admission day'})
# # Clustermap
# In[58]:
get_ipython().system('mkdir results_10Nov')
# In[59]:
import seaborn as sns
sns.distplot(A[A['Eth. unknown'].astype('bool')]['Age (x10 years)'] * 10)
sns.distplot(A[A['Eth. NA'].astype('bool')]['Age (x10 years)']* 10)
sns.distplot(A['Age (x10 years)']* 10)
# In[60]:
import seaborn as sns
C = A.drop(columns=['dateadmittedicu', 'hospitaladmissiondate', 'finaloutcome', 'finaloutcomedate', 'dateleavingicu',
'isviralpneumoniacomplication', 'issecondarybacterialpneumoniacom',
'isardscomplication', 'isunknowncomplication',
'isothercoinfectionscomplication', 'isothercomplication'])
# Draw the full plt
ethnicity = ['White Irish', 'Black Caribbean', 'Other Asian', 'White and black African',
'Bangladeshi', 'Indian',
'Other black', 'Chinese', 'Other white', 'Black African', 'White and Asian',
'Pakistani', 'White British', 'Other mixed', 'White and black Caribbean', 'Other ethn.',
'Eth. unknown', 'Eth. NA']
print("number of people who didn't report any ethnicity")
print(C[ethnicity].apply(lambda x: ~x.any(), axis=1).sum())
# C['NA'] = C[ethnicity].apply(lambda x: ~x.any(), axis=1)
comorbidities = ['chronicrespiratory', 'asthmarequiring', 'chronicheart', 'hypertension',
'chronicrenal', 'immunosuppressiontreatment',
'immunosuppressiondisease', 'obesityclinical', 'pregnancy',
'other_comorbidity', 'age10year', 'sex_is_unkn', 'sex_is_M',
'T1 diabetes', 'T2 diabetes', 'seriousmentalillness',
'chronicneurological', 'chronicliver', 'asymptomatictesting']
comorbidities =[dizionario.get(x) if x in dizionario else x for x in comorbidities ]
Ccorr = C.corr();
Ccorr1 = Ccorr[comorbidities +['is_death', 'is_ICU']].loc[ethnicity,:]
Ccorr1 = Ccorr1.rename(columns={'is_death':'death', 'is_ICU':'ICUA'})
fig,ax = plt.subplots(1, 1, figsize=(10, 8))
sns.heatmap(Ccorr1, center=0, cmap="vlag", ax=ax,
# row_colors=network_colors,
# col_colors=network_colors,
linewidths=.75)
# figsize=(13, 13))
fig = plt.gcf()
plt.tight_layout()
plt.savefig('results_10Nov/correlation1_new.png')
# In[61]:
dizionarioR = {'Age..x10.years.':'Age (x10 years)',
'Asthma':'Asthma',
'Black.African':'Black African',
'Black.Caribbean':'Black Caribbean',
'Chronic.heart.disease':'Chronic heart disease',
'Chronic.liver':'Chronic liver',
'Chronic.neurological.cond.':'Chronic neurological cond.',
'Chronic.renal.disease':'Chronic renal disease',
'Chronic.respiratory.disease':'Chronic respiratory disease',
'Immunosuppression.disease':'Immunosuppression disease',
'Immunosuppression.treatment':'Immunosuppression treatment',
'Obesity..clinical.':'Obesity (clinical)',
'Other.Asian':'Other Asian',
'Other.black':'Other black',
'Other.comorbidity':'Other comorbidity',
'Other.ethn.':'Other ethn.',
'Other.mixed':'Other mixed',
'Other.white':'Other white',
'Serious.mental.illness':'Serious mental illness',
'Sex.male':'Sex male',
'Sex.unknown':'Sex unknown',
'T1.diabetes':'T1 diabetes',
'T2.diabetes':'T2 diabetes',
'White.and.Asian':'White and Asian',
'White.and.black.African':'White and black African',
'White.and.black.Caribbean':'White and black Caribbean',
'White.British':'White British',
'White.Irish':'White Irish',
'Asymptomatic.testing':'Asymptomatic testing',
'Admission.day':'Admission day',
'Clinical.experience':'Clinical experience',
'Eth..NA':'Eth. NA',
'Eth..unknown':'Eth. unknown'
}
# # Logistic 1
# In[242]:
ethnicity = ['Black Caribbean', 'Other Asian', 'White and black African', 'Bangladeshi', 'Indian',
'Other black', 'Chinese', 'Other white', 'Black African', 'White and Asian', 'Pakistani',
'Eth. unknown', 'Eth. NA', 'Other mixed', 'White and black Caribbean', 'White British', 'White Irish', 'Other ethn.']
# In[243]:
B = A.drop(columns=['dateadmittedicu', 'hospitaladmissiondate', 'finaloutcome', 'finaloutcomedate', 'dateleavingicu',
'isviralpneumoniacomplication', 'issecondarybacterialpneumoniacom',
'isardscomplication', 'isunknowncomplication', 'patientstillonicu',
'isothercoinfectionscomplication', 'isothercomplication']).drop(columns=['clinical experience'])
# In[244]:
percentages_eth = pd.DataFrame((B[ethnicity].sum() / B.shape[0]).sort_values(ascending=False))
percentages_com = pd.DataFrame((B[comorbidities].sum() / B.shape[0]).sort_values(ascending=False))
# In[245]:
percentages_eth
# In[246]:
percentages_com
# In[247]:
percentages_eth.to_excel('results_10Nov/frequencies_et.xls')
percentages_com.to_excel('results_10Nov/frequencies_com.xls')
# In[68]:
targets = ['is_death', 'is_ICU']
# In[69]:
B=B.drop(columns='White British')
# ## Death outcome
# In[70]:
target = 'is_death'
predictors = [x for x in B.columns if x not in targets]
X_train, X_test, y_train, y_test = train_test_split(B[predictors], B[target], test_size=0.1, random_state=42)
# In[71]:
X_train.to_csv('results_10Nov/X_train_death.csv')
pd.DataFrame(y_train).to_csv('results_10Nov/y_train_death.csv')
X_test.to_csv('results_10Nov/X_test_death.csv')
# In[72]:
get_ipython().system('pwd')
# In[73]:
get_ipython().system('/usr/bin/Rscript logisticregression.R results_10Nov/X_train_death.csv results_10Nov/y_train_death.csv results_10Nov/X_test_death.csv results_10Nov/logistic_summary_death.csv results_10Nov/logistic_prediction_death.csv')
# In[74]:
Vif_death = pd.read_csv("tmp.csv")
Vif_death.sort_values(by='vif.logitMod.', ascending=False).head()
# In[75]:
# Vif_death.sort_values(by='vif.logitMod.', ascending=False).rename(index=dizionarioR).to_excel('results_10Nov/vif_GLM_death_1.xls')
# In[76]:
logistic_summary = pd.read_csv("results_10Nov/logistic_summary_death.csv")
logistic_prediction = pd.read_csv("results_10Nov/logistic_prediction_death.csv")
# In[77]:
fig, ax = plt.subplots(1,1,figsize=(4, 3.5))
xw.buildROC(y_test, logistic_prediction, ax, color=None, label=None, CI=True)
ax.set_title('ROC GML logistic for death outcome');
# fig.savefig('results_10Nov/ROC_GML_death_1.png')
# In[78]:
fig_, ax_ = plt.subplots(1, 2, figsize=(4 * 2.2, 3.5), sharey=True)
xw.buildROC(y_test, logistic_prediction, ax_[0], color=None, label=None, CI=True)
ax_[0].set_title('ROC GML logistic for death outcome');
# fig.savefig('results_10Nov/ROC_GML_death_1.png')
# In[79]:
logistic_summary['OR'] = np.exp(logistic_summary['Estimate'])
# Taylor series-based delta method
logistic_summary['OR_sd'] = logistic_summary[['Std. Error', 'OR']].apply(lambda x: np.sqrt(x.OR**2) * x['Std. Error'], axis=1)
# In[80]:
def is_not_ethn(x):
if x in ['Bangladeshi', 'Black.African', 'Black.Caribbean', 'Chinese', 'Other.ethn.', 'Indian', 'Other.Asian',
'Other.black', 'Other.mixed',
'Other.white', 'Pakistani', 'White.and.Asian', 'White.and.black.African',
'White.and.black.Caribbean', 'Eth..unknown', 'Eth..NA','White.Irish']:
return 1
elif ('Sex' in x) or ('Age' in x) or ('Admis' in x) or ('Pregnan' in x) or ("Asympt" in x) or ("Immunosuppression.treatment" in x):
return 0
else:
return 2
# In[81]:
logistic_summary['order'] = logistic_summary['Row.names'].apply(is_not_ethn)
# In[82]:
logistic_summary = logistic_summary.sort_values(by=['order','Estimate'], ascending=False).drop(columns=['order'])
# In[83]:
logistic_summary['Row.names'] = logistic_summary['Row.names'].apply(lambda x: dizionarioR.get(x) if x in dizionarioR else x)
# In[84]:
logistic_summary
# In[85]:
def pvaluesymbol(P):
if P > 0.05:
return ''
elif (P <= 0.05) & (P > 0.01):
return '*'
elif (P <= 0.01) & (P > 0.001):
return '**'
elif (P <= 0.001) & (P > 0.0001):
return '***'
elif P <= 0.0001:
return '****'
# In[86]:
logistic_summary['pvaluesymbol'] = logistic_summary['Pr(>|z|)'].apply(pvaluesymbol)
# In[87]:
logistic_summary.to_excel('results_10Nov/coefficients_GLM_death_1.xls')
# In[88]:
def plot_coeff(summary, ax, title=None, xtext=None):
summary = summary.sort_values(by='Estimate', ascending=True)
summary.plot.barh(x='Row.names', y='Estimate',
ax=ax, color='none',
xerr='Std. Error',
legend=False)
ax.set_ylabel('')
ax.set_xlabel('')
ax.set_title(title)
ax.scatter(y= | pd.np.arange(summary.shape[0]) | pandas.np.arange |
# =========================================================================== #
# ANALYSIS #
# =========================================================================== #
'''Analysis and inference functions'''
# %%
# --------------------------------------------------------------------------- #
# LIBRARIES #
# --------------------------------------------------------------------------- #
import os
import sys
import inspect
import numpy as np
import pandas as pd
import scipy
from scipy import stats
from scipy.stats import kurtosis, skew
import textwrap
import univariate
import visual
import description
import independence
# %%
# ---------------------------------------------------------------------------- #
# ANALYSIS #
# ---------------------------------------------------------------------------- #
def analysis(df, x, y, hue=None):
k = independence.Kruskal()
a = independence.Anova()
if ((df[x].dtype == np.dtype('int64') or df[x].dtype == np.dtype('float64')) and
(df[y].dtype == np.dtype('int64') or df[y].dtype == np.dtype('float64'))):
desc = | pd.DataFrame() | pandas.DataFrame |
import json
import os
import glob
import random
from typing import Union
try:
import xarray as xr
except ModuleNotFoundError:
xr = None
import numpy as np
import pandas as pd
from .datasets import Datasets
from .utils import check_attributes, download, sanity_check
from ai4water.utils.utils import dateandtime_now
try: # shapely may not be installed, as it may be difficult to isntall and is only needed for plotting data.
from ai4water.pre_processing.spatial_utils import plot_shapefile
except ModuleNotFoundError:
plot_shapefile = None
# directory separator
SEP = os.sep
def gb_message():
link = "https://doi.org/10.5285/8344e4f3-d2ea-44f5-8afa-86d2987543a9"
raise ValueError(f"Dwonlaoad the data from {link} and provide the directory "
f"path as dataset=Camels(data=data)")
class Camels(Datasets):
"""
Get CAMELS dataset.
This class first downloads the CAMELS dataset if it is not already downloaded.
Then the selected attribute for a selected id are fetched and provided to the
user using the method `fetch`.
Attributes
-----------
- ds_dir str/path: diretory of the dataset
- dynamic_features list: tells which dynamic attributes are available in
this dataset
- static_features list: a list of static attributes.
- static_attribute_categories list: tells which kinds of static attributes
are present in this category.
Methods
---------
- stations : returns name/id of stations for which the data (dynamic attributes)
exists as list of strings.
- fetch : fetches all attributes (both static and dynamic type) of all
station/gauge_ids or a speficified station. It can also be used to
fetch all attributes of a number of stations ids either by providing
their guage_id or by just saying that we need data of 20 stations
which will then be chosen randomly.
- fetch_dynamic_features :
fetches speficied dynamic attributes of one specified station. If the
dynamic attribute is not specified, all dynamic attributes will be
fetched for the specified station. If station is not specified, the
specified dynamic attributes will be fetched for all stations.
- fetch_static_features :
works same as `fetch_dynamic_features` but for `static` attributes.
Here if the `category` is not specified then static attributes of
the specified station for all categories are returned.
stations : returns list of stations
"""
DATASETS = {
'CAMELS-BR': {'url': "https://zenodo.org/record/3964745#.YA6rUxZS-Uk",
},
'CAMELS-GB': {'url': gb_message},
}
def stations(self):
raise NotImplementedError
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
raise NotImplementedError
def fetch_static_features(self, station, features):
raise NotImplementedError
@property
def start(self): # start of data
raise NotImplementedError
@property
def end(self): # end of data
raise NotImplementedError
@property
def dynamic_features(self)->list:
raise NotImplementedError
def _check_length(self, st, en):
if st is None:
st = self.start
if en is None:
en = self.end
return st, en
def to_ts(self, static, st, en, as_ts=False, freq='D'):
st, en = self._check_length(st, en)
if as_ts:
idx = pd.date_range(st, en, freq=freq)
static = pd.DataFrame(np.repeat(static.values, len(idx), axis=0), index=idx,
columns=static.columns)
return static
else:
return static
@property
def camels_dir(self):
"""Directory where all camels datasets will be saved. This will under
datasets directory"""
return os.path.join(self.base_ds_dir, "CAMELS")
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
if x is None:
x = os.path.join(self.camels_dir, self.__class__.__name__)
if not os.path.exists(x):
os.makedirs(x)
# sanity_check(self.name, x)
self._ds_dir = x
def fetch(self,
stations: Union[str, list, int, float, None] = None,
dynamic_features: Union[list, str, None] = 'all',
static_features: Union[str, list, None] = None,
st: Union[None, str] = None,
en: Union[None, str] = None,
as_dataframe:bool = False,
**kwargs
) -> Union[dict, pd.DataFrame]:
"""
Fetches the attributes of one or more stations.
Arguments:
stations : if string, it is supposed to be a station name/gauge_id.
If list, it will be a list of station/gauge_ids. If int, it will
be supposed that the user want data for this number of
stations/gauge_ids. If None (default), then attributes of all
available stations. If float, it will be supposed that the user
wants data of this fraction of stations.
dynamic_features : If not None, then it is the attributes to be
fetched. If None, then all available attributes are fetched
static_features : list of static attributes to be fetches. None
means no static attribute will be fetched.
st : starting date of data to be returned. If None, the data will be
returned from where it is available.
en : end date of data to be returned. If None, then the data will be
returned till the date data is available.
as_dataframe : whether to return dynamic attributes as pandas
dataframe or as xarray dataset.
kwargs : keyword arguments to read the files
returns:
If both static and dynamic features are obtained then it returns a
dictionary whose keys are station/gauge_ids and values are the
attributes and dataframes.
Otherwise either dynamic or static features are returned.
"""
if isinstance(stations, int):
# the user has asked to randomly provide data for some specified number of stations
stations = random.sample(self.stations(), stations)
elif isinstance(stations, list):
pass
elif isinstance(stations, str):
stations = [stations]
elif isinstance(stations, float):
num_stations = int(len(self.stations()) * stations)
stations = random.sample(self.stations(), num_stations)
elif stations is None:
# fetch for all stations
stations = self.stations()
else:
raise TypeError(f"Unknown value provided for stations {stations}")
if xr is None:
raise ModuleNotFoundError("modeule xarray must be installed to use `datasets` module")
return self.fetch_stations_attributes(stations,
dynamic_features,
static_features,
st=st,
en=en,
as_dataframe=as_dataframe,
**kwargs)
def _maybe_to_netcdf(self, fname:str):
self.dyn_fname = os.path.join(self.ds_dir, f'{fname}.nc')
if not os.path.exists(self.dyn_fname):
# saving all the data in netCDF file using xarray
print(f'converting data to netcdf format for faster io operations')
data = self.fetch(static_features=None)
data_vars = {}
coords = {}
for k, v in data.items():
data_vars[k] = (['time', 'dynamic_features'], v)
index = v.index
index.name = 'time'
coords = {
'dynamic_features': list(v.columns),
'time': index
}
xds = xr.Dataset(
data_vars=data_vars,
coords=coords,
attrs={'date': f"create on {dateandtime_now()}"}
)
xds.to_netcdf(self.dyn_fname)
def fetch_stations_attributes(self,
stations: list,
dynamic_features='all',
static_features=None,
st=None,
en=None,
as_dataframe:bool = False,
**kwargs):
"""Reads attributes of more than one stations.
Arguments:
stations : list of stations for which data is to be fetched.
dynamic_features : list of dynamic attributes to be fetched.
if 'all', then all dynamic attributes will be fetched.
static_features : list of static attributes to be fetched.
If `all`, then all static attributes will be fetched. If None,
then no static attribute will be fetched.
st : start of data to be fetched.
en : end of data to be fetched.
as_dataframe : whether to return the data as pandas dataframe. default
is xr.dataset object
kwargs dict: additional keyword arguments
Returns:
Dynamic and static features of multiple stations. Dynamic features
are by default returned as xr.Dataset unless `as_dataframe` is True, in
such a case, it is a pandas dataframe with multiindex. If xr.Dataset,
it consists of `data_vars` equal to number of stations and for each
station, the `DataArray` is of dimensions (time, dynamic_features).
where `time` is defined by `st` and `en` i.e length of `DataArray`.
In case, when the returned object is pandas DataFrame, the first index
is `time` and second index is `dyanamic_features`. Static attributes
are always returned as pandas DataFrame and have following shape
`(stations, static_features). If `dynamic_features` is None,
then they are not returned and the returned value only consists of
static features. Same holds true for `static_features`.
If both are not None, then the returned type is a dictionary with
`static` and `dynamic` keys.
Raises:
ValueError, if both dynamic_features and static_features are None
"""
st, en = self._check_length(st, en)
if dynamic_features is not None:
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
if not os.path.exists(self.dyn_fname):
# read from csv files
# following code will run only once when fetch is called inside init method
dyn = self._read_dynamic_from_csv(stations, dynamic_features, st=st, en=en)
else:
dyn = xr.load_dataset(self.dyn_fname) # daataset
dyn = dyn[stations].sel(dynamic_features=dynamic_features, time=slice(st, en))
if as_dataframe:
dyn = dyn.to_dataframe(['time', 'dynamic_features'])
if static_features is not None:
static = self.fetch_static_features(stations, static_features)
stns = {'dynamic': dyn, 'static': static}
else:
stns = dyn
elif static_features is not None:
return self.fetch_static_features(stations, static_features)
else:
raise ValueError
return stns
def fetch_dynamic_features(self,
stn_id,
attributes='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches all or selected dynamic attributes of one station."""
assert isinstance(stn_id, str)
station = [stn_id]
return self.fetch_stations_attributes(station,
attributes,
None,
st=st,
en=en,
as_dataframe=as_dataframe)
def fetch_station_attributes(self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs) -> pd.DataFrame:
"""
Fetches attributes for one station.
Arguments:
station : station id/gauge id for which the data is to be fetched.
dynamic_features
static_features
as_ts : whether static attributes are to be converted into a time
series or not. If yes then the returned time series will be of
same length as that of dynamic attribtues.
st : starting point from which the data to be fetched. By default
the data will be fetched from where it is available.
en : end point of data to be fetched. By default the dat will be fetched
Return:
dataframe if as_ts is True else it returns a dictionary of static and
dynamic attributes for a station/gauge_id
"""
st, en = self._check_length(st, en)
station_df = pd.DataFrame()
if dynamic_features:
dynamic = self.fetch_dynamic_features(station, dynamic_features, st=st,
en=en, **kwargs)
station_df = pd.concat([station_df, dynamic])
if static_features is not None:
static = self.fetch_static_features(station, static_features)
if as_ts:
station_df = pd.concat([station_df, static], axis=1)
else:
station_df ={'dynamic': station_df, 'static': static}
elif static_features is not None:
station_df = self.fetch_static_features(station, static_features)
return station_df
class LamaH(Camels):
"""
Large-Sample Data for Hydrology and Environmental Sciences for Central Europe
from url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
paper: https://essd.copernicus.org/preprints/essd-2021-72/
"""
url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
_data_types = ['total_upstrm', 'diff_upstrm_all', 'diff_upstrm_lowimp'
]
time_steps = ['daily', 'hourly'
]
static_attribute_categories = ['']
def __init__(self, *,
time_step: str,
data_type: str,
**kwargs
):
"""
Arguments:
time_step : possible values are `daily` or `hourly`
data_type : possible values are `total_upstrm`, `diff_upstrm_all`
or 'diff_upstrm_lowimp'
"""
assert time_step in self.time_steps, f"invalid time_step {time_step} given"
assert data_type in self._data_types, f"invalid data_type {data_type} given."
self.time_step = time_step
self.data_type = data_type
super().__init__(**kwargs)
self._download()
fpath = os.path.join(self.ds_dir, 'lamah_diff_upstrm_lowimp_hourly_dyn.nc')
_data_types = self._data_types if self.time_step == 'daily' else ['total_upstrm']
if not os.path.exists(fpath):
for dt in _data_types:
for ts in self.time_steps:
self.time_step = ts
self.data_type = dt
fname = f"lamah_{dt}_{ts}_dyn"
self._maybe_to_netcdf(fname)
self.time_step = time_step
self.data_type = data_type
self.dyn_fname = os.path.join(self.ds_dir, f'lamah_{data_type}_{time_step}_dyn.nc')
@property
def dynamic_features(self):
station = self.stations()[0]
df = self.read_ts_of_station(station)
return df.columns.to_list()
@property
def static_features(self) -> list:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
return df.columns.to_list()
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def data_type_dir(self):
directory = 'CAMELS_AT'
if self.time_step == 'hourly':
directory = 'CAMELS_AT1' # todo, use it only for hourly, daily is causing errors
# self.ds_dir/CAMELS_AT/data_type_dir
f = [f for f in os.listdir(os.path.join(self.ds_dir, directory)) if self.data_type in f][0]
return os.path.join(self.ds_dir, f'{directory}{SEP}{f}')
def stations(self)->list:
# assuming file_names of the format ID_{stn_id}.csv
_dirs = os.listdir(os.path.join(self.data_type_dir, f'2_timeseries{SEP}{self.time_step}'))
s = [f.split('_')[1].split('.csv')[0] for f in _dirs]
return s
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
"""Reads attributes of one station"""
stations_attributes = {}
for station in stations:
station_df = pd.DataFrame()
if dynamic_features is not None:
dynamic_df = self.read_ts_of_station(station)
station_df = pd.concat([station_df, dynamic_df])
stations_attributes[station] = station_df
return stations_attributes
def fetch_static_features(self,
station:Union[str, list],
features=None
)->pd.DataFrame:
fname = os.path.join(self.data_type_dir, f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
#if features is not None:
static_features = check_attributes(features, self.static_features)
df = df[static_features]
if isinstance(station, list):
stations = [str(i) for i in station]
elif isinstance(station, int):
stations = str(station)
else:
stations = station
df.index = df.index.astype(str)
df = df.loc[stations]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
def read_ts_of_station(self, station) -> pd.DataFrame:
# read a file containing timeseries data for one station
fname = os.path.join(self.data_type_dir,
f'2_timeseries{SEP}{self.time_step}{SEP}ID_{station}.csv')
df = pd.read_csv(fname, sep=';')
if self.time_step == 'daily':
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], freq="D")
df.index = periods.to_timestamp()
else:
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"], hour=df["hh"], minute=df["mm"], freq="H")
df.index = periods.to_timestamp()
# remove the cols specifying index
[df.pop(item) for item in ['YYYY', 'MM', 'DD', 'hh', 'mm'] if item in df]
return df
@property
def start(self):
return "19810101"
@property
def end(self):
return "20191231"
class HYSETS(Camels):
"""
database for hydrometeorological modeling of 14,425 North American watersheds
from 1950-2018 following the work of
[Arsenault et al., 2020](https://doi.org/10.1038/s41597-020-00583-2)
The user must manually download the files, unpack them and provide
the `path` where these files are saved.
This data comes with multiple sources. Each source having one or more dynamic_features
Following data_source are available.
|sources | dynamic_features |
|---------------|------------------|
|SNODAS_SWE | dscharge, swe|
|SCDNA | discharge, pr, tasmin, tasmax|
|nonQC_stations | discharge, pr, tasmin, tasmax|
|Livneh | discharge, pr, tasmin, tasmax|
|ERA5 | discharge, pr, tasmax, tasmin|
|ERAS5Land_SWE | discharge, swe|
|ERA5Land | discharge, pr, tasmax, tasmin|
all sources contain one or more following dynamic_features
with following shapes
|dynamic_features | shape |
|----------------------------|------------|
|time | (25202,) |
|watershedID | (14425,) |
|drainage_area | (14425,) |
|drainage_area_GSIM | (14425,) |
|flag_GSIM_boundaries | (14425,) |
|flag_artificial_boundaries | (14425,) |
|centroid_lat | (14425,) |
|centroid_lon | (14425,) |
|elevation | (14425,) |
|slope | (14425,) |
|discharge | (14425, 25202) |
|pr | (14425, 25202) |
|tasmax | (14425, 25202) |
|tasmin | (14425, 25202) |
"""
doi = "https://doi.org/10.1038/s41597-020-00583-2"
url = "https://osf.io/rpc3w/"
Q_SRC = ['ERA5', 'ERA5Land', 'ERA5Land_SWE', 'Livneh', 'nonQC_stations', 'SCDNA', 'SNODAS_SWE']
SWE_SRC = ['ERA5Land_SWE', 'SNODAS_SWE']
OTHER_SRC = [src for src in Q_SRC if src not in ['ERA5Land_SWE', 'SNODAS_SWE']]
dynamic_features = ['discharge', 'swe', 'tasmin', 'tasmax', 'pr']
def __init__(self,
path:str,
swe_source:str = "SNODAS_SWE",
discharge_source: str = "ERA5",
tasmin_source: str = "ERA5",
tasmax_source: str = "ERA5",
pr_source: str = "ERA5",
**kwargs
):
"""
Arguments:
path : path where all the data files are saved.
swe_source : source of swe data.
discharge_source : source of discharge data
tasmin_source : source of tasmin data
tasmax_source : source of tasmax data
pr_source : source of pr data
kwargs : arguments for `Camels` base class
"""
assert swe_source in self.SWE_SRC, f'source must be one of {self.SWE_SRC}'
assert discharge_source in self.Q_SRC, f'source must be one of {self.Q_SRC}'
assert tasmin_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert tasmax_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert pr_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
self.sources = {
'swe': swe_source,
'discharge': discharge_source,
'tasmin': tasmin_source,
'tasmax': tasmax_source,
'pr': pr_source
}
super().__init__(**kwargs)
self.ds_dir = path
fpath = os.path.join(self.ds_dir, 'hysets_dyn.nc')
if not os.path.exists(fpath):
self._maybe_to_netcdf('hysets_dyn')
def _maybe_to_netcdf(self, fname:str):
# todo saving as one file takes very long time
oneD_vars = []
twoD_vars = []
for src in self.Q_SRC:
xds = xr.open_dataset(os.path.join(self.ds_dir, f'HYSETS_2020_{src}.nc'))
for var in xds.variables:
print(f'getting {var} from source {src} ')
if len(xds[var].data.shape) > 1:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
twoD_vars.append(xar)
else:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
oneD_vars.append(xar)
oneD_xds = xr.merge(oneD_vars)
twoD_xds = xr.merge(twoD_vars)
oneD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_static.nc"))
twoD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_dyn.nc"))
return
@property
def ds_dir(self):
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('HYSETS', x)
self._ds_dir = x
@property
def static_features(self):
df = self.read_static_data()
return df.columns.to_list()
def stations(self) -> list:
return self.read_static_data().index.to_list()
@property
def start(self):
return "19500101"
@property
def end(self):
return "20181231"
def fetch_stations_attributes(self,
stations: list,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
st = None,
en = None,
as_dataframe: bool = False,
**kwargs):
stations = check_attributes(stations, self.stations())
stations = [int(stn) for stn in stations]
if dynamic_features is not None:
dyn = self._fetch_dynamic_features(stations=stations,
dynamic_features=dynamic_features,
as_dataframe=as_dataframe,
**kwargs
)
if static_features is not None: # we want both static and dynamic
to_return = {}
static = self._fetch_static_features(station=stations,
static_features=static_features,
**kwargs
)
to_return['static'] = static
to_return['dynamic'] = dyn
else:
to_return = dyn
elif static_features is not None:
# we want only static
to_return = self._fetch_static_features(
station=stations,
static_features=static_features,
**kwargs
)
else:
raise ValueError
return to_return
def fetch_dynamic_features(self,
station,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False):
"""Fetches dynamic attributes of one station."""
station = [int(station)]
return self._fetch_dynamic_features(stations=station,
dynamic_features=dynamic_features,
st=st,
en=en,
as_dataframe=as_dataframe)
def _fetch_dynamic_features(self,
stations:list,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False,
as_ts=False
):
"""Fetches dynamic attributes of station."""
st, en = self._check_length(st, en)
attrs = check_attributes(dynamic_features, self.dynamic_features)
stations = np.subtract(stations, 1).tolist()
# maybe we don't need to read all variables
sources = {k:v for k,v in self.sources.items() if k in attrs}
# original .nc file contains datasets with dynamic and static features as data_vars
# however, for uniformity of this API and easy usage, we want a Dataset to have
# station names/gauge_ids as data_vars and each data_var has
# dimension (time, dynamic_variables)
# Therefore, first read all data for each station from .nc file
# then rearrange it.
# todo, this operation is slower because of `to_dataframe`
# also doing this removes all the metadata
x = {}
f = os.path.join(self.ds_dir, "hysets_dyn.nc")
xds = xr.open_dataset(f)
for stn in stations:
xds1 = xds[[f'{k}_{v}' for k, v in sources.items()]].sel(watershed=stn, time=slice(st, en))
xds1 = xds1.rename_vars({f'{k}_{v}': k for k, v in sources.items()})
x[stn] = xds1.to_dataframe(['time'])
xds = xr.Dataset(x)
xds = xds.rename_dims({'dim_1': 'dynamic_features'})
xds = xds.rename_vars({'dim_1': 'dynamic_features'})
if as_dataframe:
return xds.to_dataframe(['time', 'dynamic_features'])
return xds
def _fetch_static_features(self,
station,
static_features:Union[str, list]='all',
st=None,
en=None,
as_ts=False):
df = self.read_static_data()
static_features = check_attributes(static_features, self.static_features)
if isinstance(station, str):
station = [station]
elif isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
else:
raise ValueError
return self.to_ts(df.loc[station][static_features], st=st, en=en, as_ts=as_ts)
def fetch_static_features(self,
station,
features='all',
st=None,
en=None,
as_ts=False
)->pd.DataFrame:
return self._fetch_static_features(station, features, st, en, as_ts)
def read_static_data(self):
fname = os.path.join(self.ds_dir, 'HYSETS_watershed_properties.txt')
static_df = pd.read_csv(fname, index_col='Watershed_ID', sep=';')
static_df.index = static_df.index.astype(str)
return static_df
class CAMELS_US(Camels):
"""
Downloads and processes CAMELS dataset of 671 catchments named as CAMELS
from https://ral.ucar.edu/solutions/products/camels
https://doi.org/10.5194/hess-19-209-2015
"""
DATASETS = ['CAMELS_US']
url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip"
catchment_attr_url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip"
folders = {'basin_mean_daymet': f'basin_mean_forcing{SEP}daymet',
'basin_mean_maurer': f'basin_mean_forcing{SEP}maurer',
'basin_mean_nldas': f'basin_mean_forcing{SEP}nldas',
'basin_mean_v1p15_daymet': f'basin_mean_forcing{SEP}v1p15{SEP}daymet',
'basin_mean_v1p15_nldas': f'basin_mean_forcing{SEP}v1p15{SEP}nldas',
'elev_bands': f'elev{SEP}daymet',
'hru': f'hru_forcing{SEP}daymet'}
dynamic_features = ['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)', 'Flow']
def __init__(self, data_source='basin_mean_daymet'):
assert data_source in self.folders, f'allwed data sources are {self.folders.keys()}'
self.data_source = data_source
super().__init__("CAMELS_US")
if os.path.exists(self.ds_dir):
print(f"dataset is already downloaded at {self.ds_dir}")
else:
download(self.url, os.path.join(self.camels_dir, f'CAMELS_US{SEP}CAMELS_US.zip'))
download(self.catchment_attr_url, os.path.join(self.camels_dir, f"CAMELS_US{SEP}catchment_attrs.zip"))
self._unzip()
self.attr_dir = os.path.join(self.ds_dir, f'catchment_attrs{SEP}camels_attributes_v2.0')
self.dataset_dir = os.path.join(self.ds_dir, f'CAMELS_US{SEP}basin_dataset_public_v1p2')
self._maybe_to_netcdf('camels_us_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def start(self):
return "19800101"
@property
def end(self):
return "20141231"
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=';', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
def stations(self) -> list:
stns = []
for _dir in os.listdir(os.path.join(self.dataset_dir, 'usgs_streamflow')):
cat = os.path.join(self.dataset_dir, f'usgs_streamflow{SEP}{_dir}')
stns += [fname.split('_')[0] for fname in os.listdir(cat)]
# remove stations for which static values are not available
for stn in ['06775500', '06846500', '09535100']:
stns.remove(stn)
return stns
def _read_dynamic_from_csv(self,
stations,
dynamic_features:Union[str, list]='all',
st=None,
en=None,
):
dyn = {}
for station in stations:
# attributes = check_attributes(dynamic_features, self.dynamic_features)
assert isinstance(station, str)
df = None
df1 = None
dir_name = self.folders[self.data_source]
for cat in os.listdir(os.path.join(self.dataset_dir, dir_name)):
cat_dirs = os.listdir(os.path.join(self.dataset_dir, f'{dir_name}{SEP}{cat}'))
stn_file = f'{station}_lump_cida_forcing_leap.txt'
if stn_file in cat_dirs:
df = pd.read_csv(os.path.join(self.dataset_dir,
f'{dir_name}{SEP}{cat}{SEP}{stn_file}'),
sep="\s+|;|:",
skiprows=4,
engine='python',
names=['Year', 'Mnth', 'Day', 'Hr', 'dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)'],
)
df.index = pd.to_datetime(df['Year'].map(str) + '-' + df['Mnth'].map(str) + '-' + df['Day'].map(str))
flow_dir = os.path.join(self.dataset_dir, 'usgs_streamflow')
for cat in os.listdir(flow_dir):
cat_dirs = os.listdir(os.path.join(flow_dir, cat))
stn_file = f'{station}_streamflow_qc.txt'
if stn_file in cat_dirs:
fpath = os.path.join(flow_dir, f'{cat}{SEP}{stn_file}')
df1 = pd.read_csv(fpath, sep="\s+|;|:'",
names=['station', 'Year', 'Month', 'Day', 'Flow', 'Flag'],
engine='python')
df1.index = pd.to_datetime(
df1['Year'].map(str) + '-' + df1['Month'].map(str) + '-' + df1['Day'].map(str))
out_df = pd.concat([df[['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)', 'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)']],
df1['Flow']],
axis=1)
dyn[station] = out_df
return dyn
def fetch_static_features(self, station, features):
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
static_df = pd.DataFrame()
for f in files:
# index should be read as string
idx = pd.read_csv(f, sep=';', usecols=['gauge_id'], dtype=str)
_df = pd.read_csv(f, sep=';', index_col='gauge_id')
_df.index = idx['gauge_id']
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else: # index should be read as string bcs it has 0s at the start
idx = pd.read_csv(static_fpath, usecols=['gauge_id'], dtype=str)
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = idx['gauge_id']
static_df.index = static_df.index.astype(str)
df = static_df.loc[station][attributes]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
class CAMELS_BR(Camels):
"""
Downloads and processes CAMELS dataset of Brazil
"""
url = "https://zenodo.org/record/3964745#.YA6rUxZS-Uk"
folders = {'streamflow_m3s': '02_CAMELS_BR_streamflow_m3s',
'streamflow_mm': '03_CAMELS_BR_streamflow_mm_selected_catchments',
'simulated_streamflow_m3s': '04_CAMELS_BR_streamflow_simulated',
'precipitation_cpc': '07_CAMELS_BR_precipitation_cpc',
'precipitation_mswep': '06_CAMELS_BR_precipitation_mswep',
'precipitation_chirps': '05_CAMELS_BR_precipitation_chirps',
'evapotransp_gleam': '08_CAMELS_BR_evapotransp_gleam',
'evapotransp_mgb': '09_CAMELS_BR_evapotransp_mgb',
'potential_evapotransp_gleam': '10_CAMELS_BR_potential_evapotransp_gleam',
'temperature_min': '11_CAMELS_BR_temperature_min_cpc',
'temperature_mean': '12_CAMELS_BR_temperature_mean_cpc',
'temperature_max': '13_CAMELS_BR_temperature_max_cpc'
}
def __init__(self):
super().__init__("CAMELS-BR")
self._download()
self._maybe_to_netcdf('camels_dyn_br')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return os.path.join(self.camels_dir, self.name)
@property
def _all_dirs(self):
"""All the folders in the dataset_directory"""
return [f for f in os.listdir(self.ds_dir) if os.path.isdir(os.path.join(self.ds_dir, f))]
@property
def static_dir(self):
path = None
for _dir in self._all_dirs:
if "attributes" in _dir:
# supposing that 'attributes' axist in only one file/folder in self.ds_dir
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
return path
@property
def static_files(self):
all_files = None
if self.static_dir is not None:
all_files = glob.glob(f"{self.static_dir}/*.txt")
return all_files
@property
def dynamic_features(self) -> list:
return list(CAMELS_BR.folders.keys())
@property
def static_attribute_categories(self):
static_attrs = []
for f in self.static_files:
ff = str(os.path.basename(f).split('.txt')[0])
static_attrs.append('_'.join(ff.split('_')[2:]))
return static_attrs
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes','01_CAMELS_BR_attributes')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=' ', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
@property
def start(self):
return "19800101"
@property
def end(self):
return "20181231"
def all_stations(self, attribute) -> list:
"""Tells all station ids for which a data of a specific attribute is available."""
all_files = []
for _attr, _dir in self.folders.items():
if attribute in _attr:
all_files = os.listdir(os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}'))
stations = []
for f in all_files:
stations.append(str(f.split('_')[0]))
return stations
def stations(self, to_exclude=None)->list:
"""Returns a list of station ids which are common among all dynamic attributes.
>>>dataset = CAMELS_BR()
>>>stations = dataset.stations()
"""
if to_exclude is not None:
if not isinstance(to_exclude, list):
assert isinstance(to_exclude, str)
to_exclude = [to_exclude]
else:
to_exclude = []
stations = {}
for dyn_attr in self.dynamic_features:
if dyn_attr not in to_exclude:
stations[dyn_attr] = self.all_stations(dyn_attr)
stns = list(set.intersection(*map(set, list(stations.values()))))
return stns
def _read_dynamic_from_csv(self,
stations,
attributes:Union[str, list]='all',
st=None,
en=None,
):
"""
returns the dynamic/time series attribute/attributes for one station id.
```python
>>>dataset = CAMELS_BR()
>>>pcp = dataset.fetch_dynamic_features('10500000', 'precipitation_cpc')
...# fetch all time series data associated with a station.
>>>x = dataset.fetch_dynamic_features('51560000', dataset.dynamic_features)
```
"""
attributes = check_attributes(attributes, self.dynamic_features)
dyn = {}
for stn_id in stations:
# making one separate dataframe for one station
data = pd.DataFrame()
for attr, _dir in self.folders.items():
if attr in attributes:
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
# supposing that the filename starts with stn_id and has .txt extension.
fname = [f for f in os.listdir(path) if f.startswith(str(stn_id)) and f.endswith('.txt')]
fname = fname[0]
if os.path.exists(os.path.join(path, fname)):
df = pd.read_csv(os.path.join(path, fname), sep=' ')
df.index = pd.to_datetime(df[['year', 'month', 'day']])
df.index.freq = pd.infer_freq(df.index)
df = df[st:en]
# only read one column which matches the attr
# todo, qual_flag maybe important
[df.pop(item) for item in df.columns if item != attr]
data = pd.concat([data, df], axis=1)
else:
raise FileNotFoundError(f"file {fname} not found at {path}")
dyn[stn_id] = data
return dyn
def fetch_static_features(self,
station,
features=None
) -> pd.DataFrame:
"""
Arguments:
stn_id int/list:
station id whose attribute to fetch
attributes str/list:
name of attribute to fetch. Default is None, which will return all the
attributes for a particular station of the specified category.
index_col_name str:
name of column containing station names
as_ts bool:
Example:
-------
```python
>>>dataset = Camels('CAMELS-BR')
>>>df = dataset.fetch_static_features(11500000, 'climate')
```
"""
if isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
elif isinstance(station, str):
station = [station]
else:
raise ValueError
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes','01_CAMELS_BR_attributes')}/*.txt")
static_df = pd.DataFrame()
for f in files:
_df = pd.read_csv(f, sep=' ', index_col='gauge_id')
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else:
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = static_df.index.astype(str)
return pd.DataFrame(static_df.loc[station][attributes])
class CAMELS_GB(Camels):
"""
This dataset must be manually downloaded by the user.
The path of the downloaded folder must be provided while initiating this class.
"""
dynamic_features = ["precipitation", "pet", "temperature", "discharge_spec",
"discharge_vol", "peti",
"humidity", "shortwave_rad", "longwave_rad", "windspeed"]
def __init__(self, path=None):
super().__init__(name="CAMELS-GB")
self.ds_dir = path
self._maybe_to_netcdf('camels_gb_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('CAMELS-GB', x)
self._ds_dir = x
@property
def static_attribute_categories(self) -> list:
attributes = []
path = os.path.join(self.ds_dir, 'data')
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)) and f.endswith('csv'):
attributes.append(f.split('_')[2])
return attributes
@property
def start(self):
return "19701001"
@property
def end(self):
return "20150930"
@property
def static_features(self):
files = glob.glob(f"{os.path.join(self.ds_dir, 'data')}/*.csv")
cols = []
for f in files:
if 'static_features.csv' not in f:
df = | pd.read_csv(f, nrows=1, index_col='gauge_id') | pandas.read_csv |
import collections
import io
import json
import math
import zipfile
import logging
from urllib.error import URLError
from urllib.request import urlopen
import pandas as pd
from matplotlib import pyplot as plt
# Getting data
def set_source(filename):
"""
Sets source global variable to the path of .zip file.
:param filename: path to the downloaded .zip file
:return: None
You can provide relative path to file
>>> set_source('facebook-YourName.zip')
Absolute path (works only on Windows)
>>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip')
"""
filename = f'file:///{filename}' if filename[1] == ':' \
else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip')
try:
global source
source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read()))
except URLError:
logging.error('File not found, try again.')
def get_data(conversation=None, chars=False, user=False):
"""
Reads data from messages.json or messages_chars.json
and finds key based on the beginning of the string.
:param conversation: beginning of the conversation id
or None for overall statistics (default None)
:param chars: True for counting chars in messages_chars.json,
False for counting messages in messages.json (default False)
:param user: True for user name instead of conversation id,
False otherwise (default False)
:return: dictionary containing the data and if applicable
a key pointing to a specific conversation, otherwise None
"""
try:
data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read())
if user:
data = pd.DataFrame(data).fillna(0).astype('int')
for key in data.index:
if key.lower().startswith(conversation.lower()):
return data, key
else:
logging.error('Conversation not found.')
return None, None
if conversation is not None:
for key in data.keys():
if key.lower().startswith(conversation.lower()):
return data, key
else:
logging.error('Conversation not found.')
return None, None
else:
return data, None
except FileNotFoundError:
logging.error('Characters not counted.' if chars else 'Messages not counted.')
# Counting messages and characters
def count_messages():
"""
Counts messages and saves output to messages.json.
:return: None
"""
namelist = source.namelist()
total, senders = {}, {x.split('/')[2] for x in namelist
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}
for sender in senders:
messages, i = collections.Counter(), 0
while True:
try:
i += 1
messages += collections.Counter(pd.DataFrame(json.loads(
source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[
'messages']).iloc[:, 0])
except KeyError:
break
total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()}
total[sender]['total'] = sum(messages.values())
with open('messages.json', 'w', encoding='utf-8') as output:
json.dump(total, output, ensure_ascii=False)
def count_characters():
"""
Counts characters from messages and saves output to messages_chars.json.
:return: None
"""
namelist = source.namelist()
total, senders = {}, {x.split('/')[2] for x in namelist
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}
for sender in senders:
counted_all, i = collections.Counter(), 0
while True:
try:
i += 1
frame = pd.DataFrame(json.loads(
source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages'])
frame['counted'] = frame.apply(
lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1)
counted_all += sum(frame['counted'], collections.Counter())
except KeyError:
break
total[sender] = dict(counted_all)
with open('messages_chars.json', 'w', encoding='utf-8') as output:
json.dump(total, output, ensure_ascii=False)
def count(chars=False):
"""
Counts messages or characters from messages
and saves output to the file.
:param chars: True for counting characters,
False for counting messages (default False)
:return: None
"""
if chars:
count_characters()
else:
count_messages()
# Statistics
def statistics(data_source, conversation=None, chars=False):
"""
Prints statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id or None for overall statistics
(default None)
:param chars: True for character statistics instead of messages,
False otherwise (default False)
:return: None
"""
if conversation is None:
if chars:
characters_statistics(data_source)
else:
messages_statistics(data_source)
else:
if chars:
raise NotImplementedError()
else:
print(conversation)
conversation_statistics(data_source, conversation)
def messages_statistics(data_source):
"""
Prints messages overall statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source).fillna(0).astype('int')
pd.set_option('display.max_rows', None)
total_values = data_source.loc['total'].sort_values(ascending=False)
print(total_values)
print(total_values.describe())
total_values = total_values.sort_values()
plt.rcdefaults()
plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:])
plt.show()
def conversation_statistics(data_source, conversation):
"""
Prints messages statistics for specific conversation of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id, or key from get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source)
data_source = data_source.loc[:, conversation]
data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int')
pd.set_option('display.max_rows', None)
print(data_source)
def characters_statistics(data_source):
"""
Prints characters statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:return: None
"""
data_source = | pd.DataFrame(data_source) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 23:34:57 2019
@author: reynaldo.espana.rey
Web scrapping algorithm to build data set for text generator
source: https://towardsdatascience.com/how-to-web-scrape-with-python-in-4-minutes-bc49186a8460
"""
# =============================================================================
# Libraries
# =============================================================================
import numpy as np
import pandas as pd
import requests
import re
import time
import os
from bs4 import BeautifulSoup
import string
# =============================================================================
# Functions
# =============================================================================
# request page and make it BeautifulSoup
def get_page(url, verbose=0):
# get page
response = requests.get(url)
if verbose:
print('Successful:', str(response) =='<Response [200]>')
if str(response) =='<Response [200]>':
# BeautifulSoup data structure
soup = BeautifulSoup(response.text, 'html.parser')
return soup
return str(response)
# function to retrieve links from inspector gadget pluggin
def get_href(url, attr):
# get page
soup = get_page(url)
# get data links
data = soup.select(attr)
links = np.unique([x['href'] for x in data])
return links
def get_text(url, attr):
# get page
soup = get_page(url)
# get data links
data = soup.select(attr)
return data
# valid file name
def valid_name(value):
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
return value
# funtion to remove chars
def remove_chars(doc, chars_2remove=None):
if chars_2remove is None:
# list of character not UTF-8 to be remove from doc
chars_2remove = ['\x85', '\x91', '\x92', '\x93', '\x94', '\x96',
'\x97', '\xa0']
# as reggex expression
chars_2remove = '[' + ''.join(chars_2remove) + ']'
# erase
doc = re.sub(chars_2remove, ' ', doc)
doc = re.sub(' +', ' ', doc).strip()
return doc
# =============================================================================
# Spanish poems
# =============================================================================
#### Spanish poems web page source
# root source
url_root = 'https://www.poemas-del-alma.com/'
## Path to use
## Retrieve poems and save it in .txt
path_poems = '../data/DB/spanish poems/'
# save list of poems links
path_poems_links = '../data/DB/poems_list.csv'
# =============================================================================
# Poems
# =============================================================================
##### POETS #####
# poems by author in alphabetial order
alphabet = [x for x in string.ascii_uppercase]
# get list of poets
poets = pd.DataFrame()
for letter in alphabet:
print(letter)
links = get_href(url_root + letter + '.html', attr='#content li a')
authors = pd.DataFrame({'author': [x.split('/')[-1].split('.')[0] for x in links],
'link': links})
poets = poets.append(authors)
time.sleep(.5)
poets = poets.reset_index(drop=True)
print('Poests found:', len(poets))
##### POEMS #####
# go throgh all the poems in poets
# run only for poems not already in folder
poems = pd.read_csv(path_poems_links)
# filter poets to scrap
poets['in_disk'] = poets['author'].isin(poems['author'])
# filter songs df
print ('Files in disk already:', poets.groupby(['in_disk']).size())
# loop to remaining poets
poets_2scrap = poets[poets['in_disk']==False]
# shuffle, else all errors will be first
poets_2scrap = poets_2scrap.sample(frac=1).reset_index(drop=True)
# loop for each poet link
for index, row in poets_2scrap.iterrows():
if (index % 25 == 0):
print('\n\n- Progress %:', index/len(poets_2scrap), '- Total poems:', len(poems))
time.sleep(5)
try:
# get page with poems links
links = get_href(row['link'], attr='#block-poems a')
time.sleep(.5)
links = pd.DataFrame({'poem': links})
# save and append
links['author'] = row['author']
links['author_link'] = row['link']
poems = poems.append(links, sort=False)
except:
print("An exception occurred:", row['link'])
time.sleep(30)
print('Poems found:', len(poems))
poems.to_csv(path_poems_links, index=False)
# =============================================================================
# COURPUS
# =============================================================================
### Create poem corpus and save it as .txt
# list of poems to search
poems = | pd.read_csv(path_poems_links) | pandas.read_csv |
#!/usr/bin/python
# coding=utf-8
# 采用TF-IDF方法提取文本关键词
# http://scikit-learn.org/stable/modules/feature_extraction.html#tfidf-term-weighting
import sys,codecs
import pandas as pd
import numpy as np
import jieba.posseg
import jieba.analyse
# from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
"""
TF-IDF权重:
1、CountVectorizer 构建词频矩阵
2、TfidfTransformer 构建tfidf权值计算
3、文本的关键字
4、对应的tfidf矩阵
"""
# 数据预处理操作:分词,去停用词,词性筛选
def dataPrepos(text, stopkey,pos):
l = []
# pos = ['n','v','vn']
#'nz',#名词
#'v',
# 'vd',
#'vn',#动词
#'l',
#'a',#形容词
# 'd'#副词
#] # 定义选取的词性
seg = jieba.posseg.cut(text) # 分词
for i in seg:
if i.word not in stopkey and i.flag in pos: # 去停用词 + 词性筛选
# if i.word not in stopkey: # 去停用词 + 词性筛选
l.append(i.word)
return l
def preprocess_for_corpus(text, stopkey,pos):
l = []
seg = jieba.posseg.cut(text)
for i in seg:
if i.word not in stopkey and i.flag in pos: # 去停用词 + 词性筛选
l.append(i.word)
return ' '.join(l)
# tf-idf获取文本top10关键词
def getKeywords_tfidf(data,stopkey,topK,pos):
idList, titleList, abstractList = data['id'], data['title'], data['abstract']
corpus = [] # 将所有文档输出到一个list中,一行就是一个文档
for index in range(len(idList)):
text = '%s。%s' % (titleList[index], abstractList[index]) # 拼接标题和摘要
text = dataPrepos(text,stopkey,pos) # 文本预处理
text = " ".join(text) # 连接成字符串,空格分隔
corpus.append(text)
# 1、构建词频矩阵,将文本中的词语转换成词频矩阵
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus) # 词频矩阵,a[i][j]:表示j词在第i个文本中的词频
# 2、统计每个词的tf-idf权值
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(X)
# 3、获取词袋模型中的关键词
word = vectorizer.get_feature_names()
# 4、获取tf-idf矩阵,a[i][j]表示j词在i篇文本中的tf-idf权重
weight = tfidf.toarray()
# 5、打印词语权重
ids, titles, keys = [], [], []
for i in range(len(weight)):
print(u"-------这里输出第", i+1 , u"篇文本的词语tf-idf------")
ids.append(idList[i])
titles.append(titleList[i])
df_word,df_weight = [],[] # 当前文章的所有词汇列表、词汇对应权重列表
for j in range(len(word)):
print( word[j], weight[i][j])
df_word.append(word[j])
df_weight.append(weight[i][j])
df_word = pd.DataFrame(df_word, columns=['word'])
df_weight = pd.DataFrame(df_weight, columns=['weight'])
word_weight = | pd.concat([df_word, df_weight], axis=1) | pandas.concat |
# coding=utf-8
import math
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# ### 设置
tf.logging.set_verbosity(tf.logging.ERROR) # 日志
pd.options.display.max_rows = 10 # 显示的最大行数
pd.options.display.float_format = '{:.1f}'.format
pd.set_option('display.max_columns', None) # 显示的最大列数, None表示显示所有列
pd.set_option('display.width', 200) # 显示宽度(以字符为单位)
pd.set_option('max_colwidth', 100) # 列长度,默认为50
pd.set_option('expand_frame_repr', False) # 是否换行显示,False表示不允许, True表示允许
california_housing_dataframe = pd.read_csv("Zcalifornia_housing_train.csv", sep=",") # 加载数据集
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index)) # 对数据进行随机化处理
california_housing_dataframe["median_house_value"] /= 1000.0 # 将median_house_value调整为以千为单位
print("california_housing_dataframe: ", california_housing_dataframe)
# ### 检查数据
print("california_housing_dataframe description: ", california_housing_dataframe.describe()) # 各列的统计摘要
# ### 构建第一个模型
# 第1步:定义特征并配置特征列
my_feature = california_housing_dataframe[["total_rooms"]] # 从california_housing_dataframe中提取total_rooms数据
feature_columns = [tf.feature_column.numeric_column("total_rooms")] # 使用numeric_column定义特征列,将其数据指定为数值
# 第2步:定义目标
targets = california_housing_dataframe["median_house_value"]
# 第3步:配置LinearRegressor
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.00005) # 使用梯度下降法训练模型
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) # 应用梯度裁剪到优化器
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer) # 配置linear_regressor
# 第4步:定义输入函数
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): # 定义输入函数
"""Trains a linear regression model of one feature.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
features = {key: np.array(value) for key, value in dict(features).items()} # 将Pandas特征数据转换成NumPy数组字典
ds = Dataset.from_tensor_slices((features, targets)) # 数据构建Dataset对象
ds = ds.batch(batch_size).repeat(num_epochs) # 将数据拆分成多批数据,以按照指定周期数进行重复
if shuffle: # 如果shuffle设置为True,则会对数据进行随机处理,以便数据在训练期间以随机方式传递到模型
ds = ds.shuffle(buffer_size=10000) # buffer_size参数指定shuffle从中随机抽样的数据集的大小
features, labels = ds.make_one_shot_iterator().get_next() # 为该数据集构建一个迭代器,并向LinearRegressor返回下一批数据
return features, labels
# 第5步:训练模型
_ = linear_regressor.train(
input_fn=lambda: my_input_fn(my_feature, targets),
steps=100) # 在 linear_regressor 上调用 train() 来训练模型
# 第6步:评估模型
prediction_input_fn = lambda: my_input_fn(my_feature, targets, num_epochs=1, shuffle=False)
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
mean_squared_error = metrics.mean_squared_error(predictions, targets) # 均方误差 (MSE)
root_mean_squared_error = math.sqrt(mean_squared_error) # 均方根误差 (RMSE)
print("Mean Squared Error (on training data): %0.3f" % mean_squared_error)
print("Root Mean Squared Error (on training data): %0.3f" % root_mean_squared_error)
min_house_value = california_housing_dataframe["median_house_value"].min()
max_house_value = california_housing_dataframe["median_house_value"].max()
min_max_difference = max_house_value - min_house_value # 比较RMSE与目标最大值和最小值的差值
print("Min. Median House Value: %0.3f" % min_house_value)
print("Max. Median House Value: %0.3f" % max_house_value)
print("Difference between Min. and Max.: %0.3f" % min_max_difference)
print("Root Mean Squared Error: %0.3f" % root_mean_squared_error)
# 根据总体摘要统计信息,预测和目标的符合情况
calibration_data = pd.DataFrame()
calibration_data["predictions"] = | pd.Series(predictions) | pandas.Series |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import unittest
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
import pyarrow as pa
from pyarrow.compat import guid
from pyarrow.feather import (read_feather, write_feather,
FeatherReader)
from pyarrow.lib import FeatherWriter
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with self.assertRaises(pa.ArrowIOError):
FeatherReader('test_invalid_file')
def _get_null_counts(self, path, columns=None):
reader = FeatherReader(path)
counts = []
for i in range(reader.num_columns):
col = reader.get_column(i)
if columns is None or col.name in columns:
counts.append(col.null_count)
return counts
def _check_pandas_roundtrip(self, df, expected=None, path=None,
columns=None, null_counts=None,
nthreads=1):
if path is None:
path = random_path()
self.test_files.append(path)
write_feather(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, nthreads=nthreads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
if null_counts is None:
null_counts = np.zeros(len(expected.columns))
np.testing.assert_array_equal(self._get_null_counts(path, columns),
null_counts)
def _assert_error_on_write(self, df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
self.test_files.append(path)
def f():
write_feather(df, path)
self.assertRaises(exc, f)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
null_counts = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
null_counts.append(null_mask.sum())
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
assert_array_equal(self._get_null_counts(path), null_counts)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_platform_numpy_integers(self):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter()
writer.open(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_buffer_bounds_error(self):
# ARROW-1676
path = random_path()
self.test_files.append(path)
for i in range(16, 256):
values = pa.array([None] + list(range(i)), type=pa.float64())
writer = FeatherWriter()
writer.open(path)
writer.write_array('arr', values)
writer.close()
result = read_feather(path)
expected = pd.DataFrame({'arr': values.to_pandas()})
assert_frame_equal(result, expected)
self._check_pandas_roundtrip(expected, null_counts=[1])
def test_boolean_object_nulls(self):
repeats = 100
arr = np.array([False, None, True] * repeats, dtype=object)
df = pd.DataFrame({'bools': arr})
self._check_pandas_roundtrip(df, null_counts=[1 * repeats])
def test_delete_partial_file_on_error(self):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, u'bar', 'qux', np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path)
except:
pass
assert not os.path.exists(path)
def test_strings(self):
repeats = 1000
# we hvae mixed bytes, unicode, strings
values = [b'foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
self._assert_error_on_write(df, ValueError)
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = | pd.DataFrame({'strings': values * repeats}) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = | long(1337299200000000000) | pandas.compat.long |
import pytest
import pandas as pd
import numpy as np
from pandas import testing as pdt
from pandas import Timestamp
from datetime import datetime
from pyam import utils, META_IDX
TEST_VARS = ["foo", "foo|bar", "foo|bar|baz"]
TEST_CONCAT_SERIES = pd.Series(["foo", "bar", "baz"], index=["f", "b", "z"])
def test_pattern_match_none():
data = pd.Series(["foo", "bar"])
values = ["baz"]
obs = utils.pattern_match(data, values)
assert (obs == [False, False]).all()
def test_pattern_match_nan():
data = pd.Series(["foo", np.nan])
values = ["baz"]
obs = utils.pattern_match(data, values, has_nan=True)
assert (obs == [False, False]).all()
def test_pattern_match_one():
data = pd.Series(["foo", "bar"])
values = ["foo"]
obs = utils.pattern_match(data, values)
assert (obs == [True, False]).all()
def test_pattern_match_str_regex():
data = pd.Series(["foo", "foo2", "bar"])
values = ["foo"]
obs = utils.pattern_match(data, values)
assert (obs == [True, False, False]).all()
def test_pattern_match_ast_regex():
data = pd.Series(["foo", "foo2", "bar"])
values = ["foo*"]
obs = utils.pattern_match(data, values)
assert (obs == [True, True, False]).all()
def test_pattern_match_ast2_regex():
data = pd.Series(["foo|bar", "foo", "bar"])
values = ["*o*b*"]
obs = utils.pattern_match(data, values)
assert (obs == [True, False, False]).all()
def test_pattern_match_plus():
data = pd.Series(["foo", "foo+", "+bar", "b+az"])
values = ["*+*"]
obs = utils.pattern_match(data, values)
assert (obs == [False, True, True, True]).all()
def test_pattern_match_dot():
data = pd.Series(["foo", "fo."])
values = ["fo."]
obs = utils.pattern_match(data, values)
assert (obs == [False, True]).all()
def test_pattern_match_brackets():
data = pd.Series(["foo (bar)", "foo bar"])
values = ["foo (bar)"]
obs = utils.pattern_match(data, values)
assert (obs == [True, False]).all()
def test_pattern_match_dollar():
data = pd.Series(["foo$bar", "foo"])
values = ["foo$bar"]
obs = utils.pattern_match(data, values)
assert (obs == [True, False]).all()
def test_pattern_regexp():
data = pd.Series(["foo", "foa", "foo$"])
values = ["fo.$"]
obs = utils.pattern_match(data, values, regexp=True)
assert (obs == [True, True, False]).all()
def test_find_depth_as_list():
obs = utils.find_depth(TEST_VARS)
assert obs == [0, 1, 2]
def test_find_depth_as_str():
assert utils.find_depth("foo|bar|baz") == 2
def test_find_depth_with_str():
data = | pd.Series(["foo", "foo|bar|baz", "bar|baz", "bar|baz|foo"]) | pandas.Series |
from flask import *
import pandas as pd
import os
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from flask_ngrok import run_with_ngrok
import numpy as np
app = Flask(__name__)
run_with_ngrok(app)
basedir = os.path.abspath(os.path.dirname(__file__))
DIR = 'static/data/'
bitcoin_time_series = pd.read_csv(DIR + "cmc_plus_gold_fixed.csv", parse_dates = ['date'])
gtrend_time_series = pd.read_csv(DIR + "daily_gtrend_data_cmc.csv", parse_dates = ['date'])
dataset = bitcoin_time_series.copy()
dataset['gtrend'] = gtrend_time_series['bitcoin']
train_dates = dataset['date']
del gtrend_time_series
dataset = dataset.drop('date', axis = 1)
dataset = dataset.drop('index', axis = 1)
scaler = MinMaxScaler().fit(dataset)
dataset_scaled = scaler.transform(dataset)
@app.route('/')
def index():
loss_adam = pd.read_csv('static/data/loss_ogru/loss_history.csv')
loss_adam_h = pd.read_csv('static/data/loss_ogru/loss_history_H.csv')
loss_adam_hg = pd.read_csv('static/data/loss_ogru/loss_history_HG.csv')
loss_adam_htrend = | pd.read_csv('static/data/loss_ogru/loss_history_HTrend.csv') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: 10_FE.ipynb (unless otherwise specified).
__all__ = ['FE']
# Cell
from pyDOE import lhs
import numpy as np
from scipy.stats.distributions import norm
from scipy.stats import uniform
import yaml
from qd.cae.dyna import KeyFile
import os
import pandas as pd
from diversipy.hycusampling import maximin_reconstruction as maxmin
from pathlib import PurePath
class FE():
"""
This Class contains set of methods which performs reading of the .yaml file and replaces values of the input parameters
with newly generated sample data sets. And then, new key files are generated for simulation.
-----------
INPUTS
-----------
settigs : Input file for FE simulations to get the user input
"""
def __init__(self, settings):
self.settings = settings
self.folders_count=0
self._read_user_input()
def _read_user_input(self):
""" gets the user input details from the settings.yaml file.
Returns
-------
fin_dir : Final path of the created directory
self.Run : Number of runs
self.para_list : A .yaml file containing the parameters/ features/ variables for sampling with appropriate
values as subkeys in the same file.
self.key : .key file containg the initial simulation details.
"""
""" gets the user input details from the settings.yaml file.
Returns
-------
fin_dir : Final path of the created directory
self.Run : Number of runs
self.para_list : A .yaml file containing the parameters/ features/ variables for sampling with appropriate
values as subkeys in the same file.
self.key : .key file containg the initial simulation details.
"""
with open(self.settings,'r') as file:
inp = yaml.load(file, Loader=yaml.FullLoader)
inp_vals=[*inp.values()]
inp_keys=[*inp.keys()]
req=['baseline_directory','simulations']
for names in req:
if names not in inp_keys:
raise Exception(names +" not in dynakit_FE.yaml file")
if inp[names] == None:
raise Exception(names +" value not in dynakit_FE.yaml file")
if isinstance(inp['simulations'], int) == True:
self.Run=inp['simulations']
self.int='yes'
self.Flag=1
elif isinstance(inp['simulations'], str) == True:
self.DOE=pd.read_csv(inp['simulations'])
self.int='no'
self.Run=len(self.DOE)
self.Flag=1
else:
print('Enter either a Integer or a .csv Input')
self.cwd=os.getcwd()
base_dir=PurePath(inp['baseline_directory'])
self.basepath=os.path.abspath(base_dir)
self.fin_dir=os.path.dirname(self.basepath)
self.basename=base_dir.name
self.dyna_dir = os.path.join(self.fin_dir,'.dynakit')
self.para_list='FE_parameters.yaml'
self.key=inp['main_key']
self.fol_name=self.basename.split('_')[0]
if os.path.exists(self.dyna_dir):
if [name for name in os.listdir(self.dyna_dir) if name.endswith(".csv")] == []:
os.rmdir(self.dyna_dir)
try:
os.mkdir(self.dyna_dir)
except OSError as err:
print('Adding new samples to the existing directory')
self.Flag=0
return self.fin_dir , self.Run , self.key , self.para_list
def read_parameters(self):
""" converts the .yaml file to a dictionary
Parameters
----------
self.para_list : the config.yaml file with the user inputs
Returns
-------
z : the .yaml file in dictionary format
"""
os.chdir(self.fin_dir)
with open(self.para_list,'r') as file:
parameter_list = yaml.load(file, Loader=yaml.FullLoader)
dynParams = {k: v for k, v in parameter_list['parameters'].items() if parameter_list['parameters'][k]['type'] == 'dynaParameter'}
self.dynaParameters = pd.DataFrame.from_dict(dynParams)
onparams = {k: v for k, v in dynParams.items() if dynParams[k]['status'] == True }
self.new_par= | pd.DataFrame.from_dict(onparams) | pandas.DataFrame.from_dict |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Parses Job Data
# ---
#
# Applied job analaysis scripts to job directories and compiles.
#
# This script when rerunning all jobs took 82.928 min on Wed Feb 3 16:27:22 PST 2021
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import pickle
from pathlib import Path
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
pd.set_option("display.max_columns", None)
pd.options.display.max_colwidth = 100
# #########################################################
from misc_modules.pandas_methods import reorder_df_columns
from vasp.vasp_methods import read_incar, get_irr_kpts_from_outcar
# #########################################################
from methods import (
get_df_jobs,
get_df_jobs_data,
get_df_jobs_paths,
get_df_jobs_data_clusters,
)
from methods import get_df_jobs_data
from local_methods import (
parse_job_err,
parse_finished_file,
parse_job_state,
is_job_submitted,
get_isif_from_incar,
get_number_of_ionic_steps,
analyze_oszicar,
read_data_pickle,
get_final_atoms,
get_init_atoms,
get_magmoms_from_job,
get_ads_from_path,
)
from local_methods import is_job_started
from local_methods import get_forces_info
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
else:
from tqdm import tqdm
verbose = False
# ### Script Inputs
# Rerun job parsing on all existing jobs, needed if job parsing methods are updated
rerun_all_jobs = False
# rerun_all_jobs = True
# +
compenv = os.environ.get("COMPENV", None)
if compenv != "wsl":
rerun_all_jobs = True
if rerun_all_jobs:
print("rerun_all_jobs=True")
# print("Remember to turn off this flag under normal operation")
PROJ_irox_oer_gdrive = os.environ["PROJ_irox_oer_gdrive"]
# -
# ### Read Data
# +
# #########################################################
df_jobs_paths = get_df_jobs_paths()
# #########################################################
df_jobs = get_df_jobs(exclude_wsl_paths=True)
# #########################################################
df_jobs_data_clusters = get_df_jobs_data_clusters()
# #########################################################
df_jobs_data_old = get_df_jobs_data(exclude_wsl_paths=True, drop_cols=False)
# #########################################################
# Checking if in local env
if compenv == "wsl":
df_jobs_i = df_jobs
else:
df_jobs_i = df_jobs[df_jobs.compenv == compenv]
# -
# ### Getting job state loop
# +
data_dict_list = []
for job_id_i, row_i in df_jobs_i.iterrows():
data_dict_i = dict()
# #####################################################
compenv_i = row_i.compenv
# #####################################################
# #####################################################
job_id = row_i.job_id
att_num = row_i.att_num
# #####################################################
# #####################################################
df_jobs_paths_i = df_jobs_paths[
df_jobs_paths.compenv == compenv_i]
row_jobs_paths_i = df_jobs_paths_i.loc[job_id_i]
# #####################################################
gdrive_path = row_jobs_paths_i.gdrive_path
path_job_root_w_att_rev = row_jobs_paths_i.path_job_root_w_att_rev
# #####################################################
data_dict_i["job_id"] = job_id
data_dict_i["compenv"] = compenv_i
data_dict_i["att_num"] = att_num
if compenv == "wsl":
path_full_i = os.path.join(
PROJ_irox_oer_gdrive,
gdrive_path)
else:
path_full_i = os.path.join(
os.environ["PROJ_irox_oer"],
path_job_root_w_att_rev)
# #################################################
job_state_i = parse_job_state(path_full_i)
data_dict_i.update(job_state_i)
data_dict_list.append(data_dict_i)
# #################################################
df_jobs_state = pd.DataFrame(data_dict_list)
# -
def read_dir_data_dict(path_i):
"""
"""
# ########################################################
import os
import json
from pathlib import Path
data_path = os.path.join(
path_i, "data_dict.json")
my_file = Path(data_path)
if my_file.is_file():
with open(data_path, "r") as fle:
data_dict_i = json.load(fle)
else:
data_dict_i = dict()
# ########################################################
return(data_dict_i)
# ### Main Loop
print("Starting the main loop on parse_job_data.py")
# +
# # TEMP
# print(222 * "TEMP | ")
# df_jobs_i = df_jobs_i.loc[[
# # "seratado_15",
# # "gegupagu_35",
# "pulefevo_10",
# ]]
# +
rows_from_clusters = []
rows_from_prev_df = []
data_dict_list = []
for job_id_i, row_i in df_jobs_i.iterrows():
# print(job_id_i)
# #####################################################
data_dict_i = dict()
# #####################################################
bulk_id = row_i.bulk_id
slab_id = row_i.slab_id
job_id = row_i.job_id
facet = row_i.facet
ads = row_i.ads
compenv_i = row_i.compenv
active_site_i = row_i.active_site
att_num = row_i.att_num
rev_num = row_i.rev_num
# #####################################################
# #####################################################
row_jobs_paths_i = df_jobs_paths.loc[job_id_i]
# #####################################################
path_job_root_w_att_rev = row_jobs_paths_i.path_job_root_w_att_rev
gdrive_path_i = row_jobs_paths_i.gdrive_path
# #####################################################
# #####################################################
df_jobs_data_clusters_i = df_jobs_data_clusters[
df_jobs_data_clusters.compenv == compenv_i]
# #####################################################
# #####################################################
# gdrive_path_i = df_jobs_paths.loc[job_id_i].gdrive_path
incar_path = os.path.join(
os.environ["PROJ_irox_oer_gdrive"],
gdrive_path_i,
"INCAR")
# #####################################################
# #####################################################
# #####################################################
# Deciding to run job or grabbing it from elsewhere
# #####################################################
# #####################################################
# #####################################################
run_job_i = True
# job_grabbed_from_clusters = False
job_grabbed_from_prev_df = False
if rerun_all_jobs:
run_job_i = True
else:
if job_id_i in df_jobs_data_clusters_i.index:
run_job_i = False
job_grabbed_from_clusters = True
# #############################################
row_cluster_i = df_jobs_data_clusters_i.loc[job_id_i]
# #############################################
incar_params_i = row_cluster_i.incar_params
completed_i = row_cluster_i.completed
# #############################################
finished_path = os.path.join(
os.environ["PROJ_irox_oer_gdrive"],
gdrive_path_i, ".FINISHED")
job_finished = False
my_file = Path(finished_path)
if my_file.is_file():
job_finished = True
# print("Finished is there")
# If these conditions just rerun job
if not completed_i and job_finished:
run_job_i = True
job_grabbed_from_clusters = False
elif incar_params_i is None:
run_job_i = True
job_grabbed_from_clusters = False
if not run_job_i and job_grabbed_from_clusters:
if verbose:
print(job_id_i, "Grabbing from df_jobs_data_clusters")
rows_from_clusters.append(row_cluster_i)
# if not job_grabbed_from_clusters and job_id_i in df_jobs_data_old.index:
elif job_id_i in df_jobs_data_old.index:
run_job_i = False
# job_grabbed_from_clusters = True
row_from_prev_df = df_jobs_data_old.loc[job_id_i]
# #############################################
# If the prev INCAR params is None but the incar file is there then rerun
incar_params_i = row_from_prev_df.incar_params
# incar_file_and_df_dont_match = False
data_in_df_and_dir_dont_match = False
if incar_params_i is None:
my_file = Path(incar_path)
if my_file.is_file():
data_in_df_and_dir_dont_match = True
# incar_file_and_df_dont_match = True
run_job_i = True
# #############################################
# If row is shown as not completed but the .FINISHED.new file is there then rerun
completed_i = row_from_prev_df.completed
finished_file_path_i = os.path.join(
os.environ["PROJ_irox_oer_gdrive"],
gdrive_path_i,
".FINISHED.new")
my_file = Path(finished_file_path_i)
# print("WHAT SDIFJDSIF SDFJ")
# print("completed_i:", completed_i)
# print("my_file.is_file():", my_file.is_file())
# if completed_i is False and my_file.is_file():
if not completed_i and my_file.is_file():
# print("ISJHFISDIJFIJSDI")
data_in_df_and_dir_dont_match = True
run_job_i = True
# #############################################
# if not incar_file_and_df_dont_match:
if not data_in_df_and_dir_dont_match:
if verbose:
print(job_id_i, "Grabbing from prev df_jobs_data")
rows_from_prev_df.append(row_from_prev_df)
else:
if verbose:
print(job_id_i, "Failed to grab job data from elsewhere")
# #####################################################
# #####################################################
# #####################################################
# Deciding to run job or grabbing it from elsewhere
# #####################################################
# #####################################################
# #####################################################
if compenv == "wsl":
path_full_i = os.path.join(
PROJ_irox_oer_gdrive,
gdrive_path_i)
else:
path_full_i = os.path.join(
os.environ["PROJ_irox_oer"],
path_job_root_w_att_rev)
path_exists = False
my_file = Path(path_full_i)
if my_file.is_dir():
path_exists = True
if run_job_i and path_exists:
print(path_full_i)
if verbose:
print("running job")
# print("isjfdsi 000000 - - - - ")
# #################################################
job_err_out_i = parse_job_err(path_full_i, compenv=compenv_i)
# print("isjfdsi 11")
finished_i = parse_finished_file(path_full_i)
job_state_i = parse_job_state(path_full_i)
job_submitted_i = is_job_submitted(path_full_i)
job_started_i = is_job_started(path_full_i)
# print("isjfdsi 222")
isif_i = get_isif_from_incar(path_full_i)
num_steps = get_number_of_ionic_steps(path_full_i)
oszicar_anal = analyze_oszicar(path_full_i)
incar_params = read_incar(path_full_i, verbose=verbose)
irr_kpts = get_irr_kpts_from_outcar(path_full_i)
pickle_data = read_data_pickle(path_full_i)
# print("isjfdsi 333")
init_atoms = get_init_atoms(path_full_i)
final_atoms = get_final_atoms(path_full_i)
magmoms_i = get_magmoms_from_job(path_full_i)
data_dict_out_i = read_dir_data_dict(path_full_i)
forces_dict_out_i = get_forces_info(path_full_i)
# #################################################
# #################################################
data_dict_i.update(job_err_out_i)
data_dict_i.update(finished_i)
data_dict_i.update(job_state_i)
data_dict_i.update(job_submitted_i)
data_dict_i.update(job_started_i)
data_dict_i.update(isif_i)
data_dict_i.update(num_steps)
data_dict_i.update(oszicar_anal)
data_dict_i.update(pickle_data)
data_dict_i.update(data_dict_out_i)
data_dict_i.update(forces_dict_out_i)
# #################################################
data_dict_i["facet"] = facet
data_dict_i["bulk_id"] = bulk_id
data_dict_i["slab_id"] = slab_id
data_dict_i["ads"] = ads
data_dict_i["job_id"] = job_id
data_dict_i["compenv"] = compenv_i
data_dict_i["active_site"] = active_site_i
data_dict_i["att_num"] = att_num
data_dict_i["rev_num"] = rev_num
data_dict_i["incar_params"] = incar_params
data_dict_i["irr_kpts"] = irr_kpts
data_dict_i["init_atoms"] = init_atoms
data_dict_i["final_atoms"] = final_atoms
data_dict_i["magmoms"] = magmoms_i
# #################################################
data_dict_list.append(data_dict_i)
# #################################################
elif run_job_i and not path_exists and compenv == "wsl":
print("A job needed to be processed but couldn't be found locally, or wasn't processed on the cluster")
print(job_id_i, "|", gdrive_path_i)
# else:
# print("Uhhh something didn't go through properly, check out")
# #########################################################
df_jobs_data = | pd.DataFrame(data_dict_list) | pandas.DataFrame |
#!/usr/bin/env python
""" MultiQC module to parse output from scChIPseq pipeline """
from __future__ import print_function
from collections import OrderedDict
import logging
import os
import re
import pandas as pd
import subprocess
import pyBigWig as pyBW
from multiqc import config
from multiqc.plots import bargraph
from multiqc.plots import linegraph
from multiqc.modules.base_module import BaseMultiqcModule
from itertools import chain
from multiqc.plots import linegraph
import math
# Initialise the logger
log = logging.getLogger(__name__)
# Initialise your class and so on
class MultiqcModule(BaseMultiqcModule):
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(name='scChIPseq', anchor='scChIPseq',
href="https://gitlab.curie.fr/data-analysis/ChIP-seq_single-cell_LBC",
info="is a DNA alignment pipeline dedicated to single-cell ChIP-seq experiments")
# Find and load any scChIPseq reports
self.scChIPseq_data = dict()
for f in self.find_log_files('scChIPseq/all_logs'):
log.info('Found the all_logs!')
parsed_data = self.parse_scChIPseq_report(f['f'])
if parsed_data is not None:
s_name = f['s_name']
if s_name == '':
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name in self.scChIPseq_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, section='SummaryLog')
self.scChIPseq_data[s_name] = parsed_data
# Read in flagged_count
self.scChIPseq_flagged_count = dict()
for f in self.find_log_files('scChIPseq/flagged_count'):
log.info('Found the flagged_count !')
colnames = ['count', 'barcode']
if not f['root']:
log.info("is empty")
count = pd.read_csv("./" + f['fn'], delim_whitespace=True, names=colnames)
else:
log.info("is not empty")
count = pd.read_csv(f['root'] +"/" + f['fn'], delim_whitespace=True, names=colnames)
s_name = f['s_name']
if count is not None:
if s_name == '':
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name in self.scChIPseq_flagged_count:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.scChIPseq_flagged_count[s_name] = count
#Read in flagged_PCR_count
self.scChIPseq_flagged_PCR_count = dict()
for f in self.find_log_files('scChIPseq/flagged_PCR_count'):
log.info('Found the scChIPseq_flagged_PCR_count !')
colnames = ['count', 'barcode']
if not f['root']:
log.info("is empty")
count = pd.read_csv("./" + f['fn'], delim_whitespace=True, names=colnames)
else:
log.info("is not empty")
count = pd.read_csv(f['root'] + "/" + f['fn'], delim_whitespace=True, names=colnames)
s_name = f['s_name']
if count is not None:
if s_name == '':
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name in self.scChIPseq_flagged_PCR_count:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.scChIPseq_flagged_PCR_count[s_name] = count
#Read in flagged_PCR_RT_rmDup_count
self.scChIPseq_flagged_PCR_RT_count = dict()
for f in self.find_log_files('scChIPseq/flagged_PCR_RT_count'):
log.info('Found the scChIPseq_flagged_PCR_RT_count !')
colnames = ['count', 'barcode']
if not f['root']:
log.info("is empty")
count = pd.read_csv("./" + f['fn'], delim_whitespace=True, names=colnames)
else:
log.info("is not empty")
count = pd.read_csv(f['root'] + "/" + f['fn'], delim_whitespace=True, names=colnames)
s_name = f['s_name']
if count is not None:
if s_name == '':
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name in self.scChIPseq_flagged_PCR_RT_count:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.scChIPseq_flagged_PCR_RT_count[s_name] = count
# Read in flagged_PCR_RT_rmDup_count
self.scChIPseq_flagged_PCR_RT_rmDup_count = dict()
for f in self.find_log_files('scChIPseq/flagged_PCR_RT_rmDup_count'):
log.info('FOUND THE scChIPseq_flagged_PCR_RT_rmDup_count !')
colnames = ['count', 'barcode']
if not f['root']:
log.info("is empty")
count = pd.read_csv("./" + f['fn'], delim_whitespace=True, names=colnames)
else:
log.info("is not empty")
count = pd.read_csv(f['root'] + "/" + f['fn'], delim_whitespace=True, names=colnames)
s_name = f['s_name']
if count is not None:
if s_name == '':
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name in self.scChIPseq_flagged_PCR_RT_rmDup_count:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.scChIPseq_flagged_PCR_RT_rmDup_count[s_name] = count
# Read in count_matrix
self.scChIPseq_count_matrix = dict()
for f in self.find_log_files('scChIPseq/count_matrix'):
log.info('FOUND THE scChIPseq_count_matrix !')
if not f['root']:
log.info("is empty")
count = pd.read_csv("./" + f['fn'], delim_whitespace=True)
else:
log.info("is not empty")
count = pd.read_csv(f['root'] + "/" + f['fn'], delim_whitespace=True)
s_name = f['s_name']
if count is not None:
if s_name == '':
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name in self.scChIPseq_count_matrix:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.scChIPseq_count_matrix[s_name] = count
# Filter to strip out ignored sample names
self.scChIPseq_data = self.ignore_samples(self.scChIPseq_data)
#self.scChIPseq_flagged_count = self.ignore_samples(self.scChIPseq_flagged_count)
#self.scChIPseq_flagged_PCR_count = self.ignore_samples(self.scChIPseq_flagged_PCR_count)
#self.scChIPseq_flagged_PCR_RT_count = self.ignore_samples(self.scChIPseq_flagged_PCR_RT_count)
# if len(self.scChIPseq_data) == 0 or len(self.scChIPseq_flagged_count) == 0 or len(self.scChIPseq_flagged_PCR_count) == 0 or len(self.scChIPseq_flagged_PCR_RT_count) == 0:
# raise UserWarning
if len(self.scChIPseq_data) > 0:
log.info("Found {} reports".format(len(self.scChIPseq_data)))
if len(self.scChIPseq_flagged_count) > 0:
log.info("Found {} reports".format(len(self.scChIPseq_flagged_count)))
if len(self.scChIPseq_flagged_PCR_count) > 0:
log.info("Found {} reports".format(len(self.scChIPseq_flagged_PCR_count)))
if len(self.scChIPseq_flagged_PCR_RT_count) > 0:
log.info("Found {} reports".format(len(self.scChIPseq_flagged_PCR_RT_count)))
if len(self.scChIPseq_flagged_PCR_RT_rmDup_count) > 0:
log.info("Found {} reports".format(len(self.scChIPseq_flagged_PCR_RT_rmDup_count)))
if len(self.scChIPseq_count_matrix) > 0:
log.info("Found {} reports".format(len(self.scChIPseq_flagged_PCR_RT_rmDup_count)))
if len(self.scChIPseq_data) > 0:
# Write parsed report data to a file
self.write_data_file(self.scChIPseq_data, 'multiqc_scChIPseq')
# Basic Stats Table
self.scChIPseq_stats_table()
# Barcode matching bar plot
self.add_section(
name='Barcode Matching',
anchor='scChIPseq_barcode',
plot=self.scChIPseq_barcode_chart()
)
# Alignment bar plot
self.add_section (
name = 'Alignment Scores',
anchor = 'scChIPseq_alignments',
plot = self.scChIPseq_alignment_chart()
)
if len(self.scChIPseq_flagged_count) > 0:
self.scChIPseq_flagged_coverage_chart()
if len(self.scChIPseq_flagged_PCR_count) > 0:
self.scChIPseq_flagged_PCR_coverage_chart()
if len(self.scChIPseq_flagged_PCR_RT_count) > 0:
self.scChIPseq_flagged_PCR_RT_coverage_chart()
if len(self.scChIPseq_flagged_PCR_RT_rmDup_count) > 0:
self.scChIPseq_flagged_PCR_RT_rmDup_coverage_chart()
if len(self.scChIPseq_count_matrix) > 0:
self.scChIPseq_count_matrix_region_coverage_chart()
self.scChIPseq_count_matrix_cell_coverage_chart()
def parse_scChIPseq_report (self, raw_data):
""" Parse the combined scChIPseq log file. """
regexes = {
'total_reads': r"Number of input reads \|\s+(\d+)",
'avg_input_read_length': r"Average input read length \|\s+([\d\.]+)",
'uniquely_mapped': r"Uniquely mapped reads number \|\s+(\d+)",
'uniquely_mapped_percent': r"Uniquely mapped reads % \|\s+([\d\.]+)",
'avg_mapped_read_length': r"Average mapped length \|\s+([\d\.]+)",
'num_splices': r"Number of splices: Total \|\s+(\d+)",
'num_annotated_splices': r"Number of splices: Annotated \(sjdb\) \|\s+(\d+)",
'num_GTAG_splices': r"Number of splices: GT/AG \|\s+(\d+)",
'num_GCAG_splices': r"Number of splices: GC/AG \|\s+(\d+)",
'num_ATAC_splices': r"Number of splices: AT/AC \|\s+(\d+)",
'num_noncanonical_splices': r"Number of splices: Non-canonical \|\s+(\d+)",
'mismatch_rate': r"Mismatch rate per base, % \|\s+([\d\.]+)",
'deletion_rate': r"Deletion rate per base \|\s+([\d\.]+)",
'deletion_length': r"Deletion average length \|\s+([\d\.]+)",
'insertion_rate': r"Insertion rate per base \|\s+([\d\.]+)",
'insertion_length': r"Insertion average length \|\s+([\d\.]+)",
'multimapped': r"Number of reads mapped to multiple loci \|\s+(\d+)",
'multimapped_percent': r"% of reads mapped to multiple loci \|\s+([\d\.]+)",
'multimapped_toomany': r"Number of reads mapped to too many loci \|\s+(\d+)",
'multimapped_toomany_percent': r"% of reads mapped to too many loci \|\s+([\d\.]+)",
'unmapped_mismatches_percent': r"% of reads unmapped: too many mismatches \|\s+([\d\.]+)",
'unmapped_tooshort_percent': r"% of reads unmapped: too short \|\s+([\d\.]+)",
'unmapped_other_percent': r"% of reads unmapped: other \|\s+([\d\.]+)",
'match_index_1': r"## Number of matched indexes 1:\s+([\d\.]+)",
'match_index_2': r"## Number of matched indexes 2:\s+([\d\.]+)",
'match_index_1_2': r"## Number of matched indexes 1 and 2:\s+([\d\.]+)",
'match_index_3': r"## Number of matched indexes 3:\s+([\d\.]+)",
'match_barcode': r"## Number of matched barcodes:\s+([\d\.]+)",
'uniquely_mapped_and_barcoded': r"## Number of reads mapped and barcoded:\s+([\d\.]+)",
'pcr_duplicates': r"## Number of pcr duplicates:\s+([\d\.]+)",
'rt_duplicates': r"## Number of rt duplicates:\s+([\d\.]+)",
'R1_mapped_R2_unmapped': r"## Number of R1 mapped but R2 unmapped:\s+([\d\.]+)",
'reads_after_pcr_rt_rm': r"## Number of reads after PCR and RT removal \(not R1 unmapped R2\):\s+([\d\.]+)",
'R2_unmapped_duplicates': r"## Number of duplicates:\s+([\d\.]+)",
'unique_reads': r"## Number of reads after duplicates removal:\s+([\d\.]+)"
}
parsed_data = {}
for k, r in regexes.items():
r_search = re.search(r, raw_data, re.MULTILINE)
if r_search:
parsed_data[k] = float(r_search.group(1))
# Figure out the numbers for unmapped as for some reason only the percentages are given
try:
total_mapped = parsed_data['uniquely_mapped'] + parsed_data['multimapped'] + parsed_data['multimapped_toomany']
unmapped_count = parsed_data['total_reads'] - total_mapped
total_unmapped_percent = parsed_data['unmapped_mismatches_percent'] + parsed_data['unmapped_tooshort_percent'] + parsed_data['unmapped_other_percent']
parsed_data['uniquely_mapped_unbarcoded'] = int(round(parsed_data['uniquely_mapped']-parsed_data['uniquely_mapped_and_barcoded']))
parsed_data['multimapped'] = int(round(parsed_data['multimapped'] + parsed_data['multimapped_toomany']))
parsed_data['unmapped'] = unmapped_count
#Data for the barcode matching graph
parsed_data['reads_after_pcr_rt_rm']=parsed_data['reads_after_pcr_rt_rm'] - parsed_data['R1_mapped_R2_unmapped']
parsed_data['index_1_2_not_3'] = int(round(parsed_data['match_index_1_2'] - parsed_data['match_barcode']))
parsed_data['index_1_not_2_not_3'] = int(round(parsed_data['match_index_1'] - parsed_data['index_1_2_not_3'] - parsed_data['match_barcode']))
parsed_data['index_2_not_1_3'] = int(round(parsed_data['match_index_2'] - parsed_data['match_index_1_2']))
parsed_data['index_3_not_1_2'] = int(round(parsed_data['match_index_3'] - parsed_data['match_barcode']))
parsed_data['no_index_found'] = int(round(parsed_data['total_reads'] - parsed_data['match_barcode'] - parsed_data['index_1_2_not_3'] - parsed_data['index_1_not_2_not_3'] - parsed_data['index_2_not_1_3'] - parsed_data['index_3_not_1_2']))
parsed_data['uniquely_mapped_and_barcoded_percent'] = 100*parsed_data['uniquely_mapped_and_barcoded'] / parsed_data['total_reads']
parsed_data['unique_reads_percent'] = 100 * parsed_data['unique_reads'] / \
parsed_data['total_reads']
log.info(parsed_data['uniquely_mapped_and_barcoded_percent'])
try:
parsed_data['unmapped_mismatches'] = int(round(unmapped_count * (parsed_data['unmapped_mismatches_percent'] / total_unmapped_percent), 0))
parsed_data['unmapped_tooshort'] = int(round(unmapped_count * (parsed_data['unmapped_tooshort_percent'] / total_unmapped_percent), 0))
parsed_data['unmapped_other'] = int(round(unmapped_count * (parsed_data['unmapped_other_percent'] / total_unmapped_percent), 0))
except ZeroDivisionError:
parsed_data['unmapped_mismatches'] = 0
parsed_data['unmapped_tooshort'] = 0
parsed_data['unmapped_other'] = 0
except KeyError:
pass
if len(parsed_data) == 0: return None
return parsed_data
def scChIPseq_stats_table(self):
""" Take the parsed stats from the STAR report and add them to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['uniquely_mapped_percent'] = {
'title': '% Aligned',
'description': '% Uniquely mapped reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['uniquely_mapped_and_barcoded_percent'] = {
'title': '% Aligned and Barcoded',
'description': '% Aligned and Barcoded reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['unique_reads_percent'] = {
'title': '% Unique Reads',
'description': '% Unique Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
self.general_stats_addcols(self.scChIPseq_data, headers)
def scChIPseq_alignment_chart (self):
""" Make the plot showing alignment rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['unique_reads'] = {'color': '#00bf00', 'name': 'Deduplicated reads'}
keys['R2_unmapped_duplicates'] = {'color': '#00e887', 'name': '\"Window\" duplicates'}
keys['rt_duplicates'] = {'color': '#0c7bd1', 'name': 'RT duplicates'}
keys['pcr_duplicates'] = {'color': '#4914e8', 'name': 'PCR duplicates'}
keys['uniquely_mapped_unbarcoded'] = {'color': '#b5d30c', 'name': 'Uniquely mapped, not barcoded'}
keys['multimapped'] = {'color': '#edb900', 'name': 'Mapped to multiple loci'}
keys['unmapped'] = {'color': '#ff2c20', 'name': 'Unmapped'}
# Config for the plot
pconfig = {
'id': 'scChIPseq_alignment_plot',
'title': 'scChIPseq: Alignment Scores',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.scChIPseq_data, keys, pconfig)
def scChIPseq_barcode_chart (self):
""" Make the plot showing alignment rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['match_barcode'] = {'color': '#00bf00', 'name': 'Barcoded'}
keys['index_1_2_not_3'] = { 'color': '#b5d30c', 'name': 'Index 1 and 2 found, not 3'}
keys['index_1_not_2_not_3'] = { 'color': '#edb900', 'name': 'Index 1 found, not 2 and 3' }
keys['index_2_not_1_3'] = { 'color': '#8922ff', 'name': 'Index 2 found, not 1 and 3' }
keys['index_3_not_1_2'] = { 'color': '#fb21ff', 'name': 'Index 3 found, not 1 and 2' }
keys['no_index_found'] = { 'color': '#ff2c20', 'name': 'No Index Found ~ genomic DNA' }
# Config for the plot
pconfig = {
'id': 'scChIPseq_barcode_plot',
'title': 'scChIPseq: Barcode Mapping',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.scChIPseq_data, keys, pconfig)
def scChIPseq_flagged_coverage_chart (self):
""" Make the plot showing alignment rates """
for keys in self.scChIPseq_flagged_count.keys():
flagged = pd.Series(self.scChIPseq_flagged_count[keys]['count']).value_counts()
flagged = pd.DataFrame(data=[flagged.values.tolist(), flagged.keys().to_list()])
flagged=flagged.transpose()
flagged.columns = ['Barcodes_Number', 'Reads_per_barcode']
flagged = flagged[flagged.Reads_per_barcode >= 500]
max_bins=math.ceil(flagged['Reads_per_barcode'].quantile(0.95))
step= math.ceil((max_bins-500)/40)
bins = list(range(500,max_bins,step))
flagged_dict = dict()
for index, row in flagged.iterrows():
for i in bins:
if row['Reads_per_barcode'] >= i and row['Reads_per_barcode'] < (i + step):
if i not in flagged_dict:
flagged_dict[i] = int(row['Barcodes_Number'])
else:
flagged_dict[i] = flagged_dict[i] + int(row['Barcodes_Number'])
if row['Reads_per_barcode'] >= (max_bins + step):
if (max_bins + step) not in flagged_dict:
flagged_dict[max_bins + step] = int(row['Barcodes_Number'])
else:
flagged_dict[max_bins + step] = flagged_dict[max_bins + step] + int(row['Barcodes_Number'])
data = dict()
data[keys] = flagged_dict
data_color = dict()
data_color[keys] = "#15a594"
log.info(data)
#log.info(dict(list(data.items())[0:2]))
# Config for the plot
pconfig = {
'id': 'scChIPseq_flagged_coverage_plot',
'title': "Read distribution across barcodes before duplicate removals",
'ylab': '# Barcodes',
'xlab': '# Reads per barcode',
'cpswitch_counts_label': 'Number of Reads',
'colors': data_color,
'smooth_points': 100, # Supply a number to limit number of points / smooth data
'smooth_points_sumcounts': True,
}
desc = "**Number of barcodes with more than 500 reads: **" + str(sum(flagged[flagged['Reads_per_barcode']>=500].Barcodes_Number)) +"<br>"+ "**Number of barcodes with more than 1000 reads: **" + str(sum(flagged[flagged['Reads_per_barcode']>=1000].Barcodes_Number)) + "<br>"+ "**Number of barcodes with more than 1500 reads: **" + str(sum(flagged[flagged['Reads_per_barcode']>=1500].Barcodes_Number))
self.add_section(
name='Read distribution across barcodes before duplicate removal',
anchor='scChIPseq_coverage_flagged',
description=desc,
plot=linegraph.plot(data, pconfig)
)
def scChIPseq_flagged_PCR_coverage_chart (self):
""" Make the plot showing alignment rates """
for keys in self.scChIPseq_flagged_PCR_count.keys():
flagged_PCR = | pd.Series(self.scChIPseq_flagged_PCR_count[keys]['count']) | pandas.Series |
#!/usr/bin/env python
# -*- coding: <encoding name> -*-
"""
CSData.py: Collection of tools for cgn-data-21-1
Capstone Project: Product Clustering
Functions:
KMeans_clustering(dict_begin,
images,
PATH,
DATA,
CLUSTER=4)
return: - -> saves data: ct.write_dict('img_hist_lst_'+str(CLUSTER),img_hist_lst,DATA)
# fake gaussian distribution
do_boost(p,boost):
return: P
gen_feature_vec(dict_begin,
img_hist_lst,
booster=BOOSTER,
ext='EXT',
scale=SCALE,
chop=CHOP,
stop=35000)
return: data, stats
dist_func_vec1(A, B)
return: np.sqrt(sum((A - B)**2))
dist_func_vec2(A, B)
return: np.sqrt(np.sum(np.square(A - B)))
dist_func_vec3(A1, B1, A2, B2, A3, B3)
return: np.sqrt(np.sum(np.square(A1 - B1)) +
np.sum(np.square(A2 - B2)) +
np.sum(np.square(A3 - B3)))
f_score_i(cl_real_i, cl_pred_i)
return: 2*len(s_intsec) / (len(s_pred)+len(s_real))
recall_i(cl_real_i, cl_pred_i)
return: (len(s_real) - len(s_diff_r_p)) / len(s_real)
precision_i(cl_real_i, cl_pred_i)
return: (len(s_pred) - len(s_diff_p_r)) / len(s_pred)
data_sort(all_data = None,
use = 16,
index = 0,
colorspace = 'lab')
return: pid_lst_all, clr_lst_all, dst_lst_all
# function to set up and prep the pre-clustered data
# the pre-clustered data was feature engineered to
# provide vectors in RGB, HSV and LAB colorspace
data_prep(all_data = None,
use = 16,
index = 0,
colorspace = 'lab')
return: pid_lst_all, clr_lst_all
calc_dist(pid_lst_ndx = None,
pid_lbl_grp = None,
pid_lst_all = None,
clr_lst_all = None)
return: dst_pid, plt_dat
calc_scoring(dst_pid = None,
dict_begin = None,
verbose = False,
show = 5,
knn_stop = 10,
normalize = True,
threshold = 0.3,
norm_value = 100)
return: goodness, f1scoreA, f1scoreB, recall, precision
# (16, 2, 'lab', 20, 50, 0.5)
calc_np_scoring(pid_lst = None,
dst_lst = None,
dict_begin = None,
verbose = False,
show = 10,
stop = 100,
knn_stop = 20,
normalize = True,
threshold = 0.5,
norm_value = 50)
return: result
"""
__author__ = "<NAME>"
__license__ = "GPL"
__version__ = "0.1"
__status__ = "Development"
# import modules
import os
from time import time
import numpy as np
import pandas as pd
import cv2
from sklearn.cluster import KMeans
from tqdm import tqdm
import CSTools as ct
BOOSTER = 5
SCALE = 1.0
CHOP = (0,-1)
def KMeans_clustering(dict_begin,
images,
PATH,
DATA,
CLUSTER=4):
"""
KMeans_clustering: function to cluster images by color
with a given k
input:
dict_begin: project data as dict()
images : list() of images to cluster
PATH : project path
DATA : project data path
CLUSTER : # of k-Means cluster to obtain
output:
none - file will be saved
ct.write_dict('img_hist_lst_'+str(CLUSTER),img_hist_lst,DATA)
"""
start = time()
run = time()
c = 0
i = 0
img_hist_lst = dict()
stop = 35000
for img, pid, lbl, target in zip(dict_begin['df_train_csv'].image.values,
dict_begin['df_train_csv'].posting_id.values,
dict_begin['df_train_csv'].label_group.values,
dict_begin['df_train_csv'].target.values):
file = PATH+'/'+img
if file in images:
img = os.path.basename(file)
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
#width, height = 224, 224
width, height = 175, 175
dsize = (width, height)
image = cv2.resize(cv2.imread(file),dsize)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters=CLUSTER, random_state=42, verbose=0);
clt.fit(image);
hist = ct.centroid_histogram(clt)
hist_img = list()
for percent, color in zip(hist, clt.cluster_centers_):
hist_img.append([percent, color])
img_hist_lst[img] = [hist_img,hist, clt.cluster_centers_, [pid, lbl, target]]
i += 1
c += 1
if not i % 50:
print('.',end='')
if i > 999:
i = 0
run = float((time()-run)/60)
print(f'\ncount: {c} of {len(images)} rtime: {run:.2f}',end='')
runs = float((time()-start)/60)
left = float((run*33)-(run*(c/1000)))
print(f' ttime: {runs:.2f} tleft: {left:.2f}',end='')
run = time()
ct.write_dict('img_hist_lst_temp',img_hist_lst,DATA,silent=True)
stop -= 1
if stop < 1:
break
ct.write_dict('img_hist_lst_'+str(CLUSTER),img_hist_lst,DATA)
start = float((time()-start)/60)
print(f'total time(m): {start:.2f}')
start = float(start/60)
print(f'total time(h): {start:.2f}')
# fake gaussian distribution
def do_boost(p,boost):
"""
do_boost: function for a fake gaussian distribution
to create a smeared (bigger) footprint
input:
p : value to boost
boost: # to chose width of boost
output:
P: list() with boosted p values
"""
P = list()
if int(boost) == 1:
P.append(p)
elif int(boost) == 2:
P.append(p*0.95)
P.append(p*0.95)
elif int(boost) == 3:
P.append(p*0.33)
P.append(p*0.99)
P.append(p*0.33)
elif int(boost) == 4:
P.append(p*0.4)
P.append(p*0.95)
P.append(p*0.95)
P.append(p*0.4)
elif int(boost) == 5:
P.append(p*0.3)
P.append(p*0.72)
P.append(p*0.99)
P.append(p*0.72)
P.append(p*0.3)
elif int(boost) == 6:
P.append(p*0.1)
P.append(p*0.25)
P.append(p*0.76)
P.append(p*0.99)
P.append(p*0.76)
P.append(p*0.25)
P.append(p*0.1)
elif int(boost) == 7:
P.append(p*0.1)
P.append(p*0.25)
P.append(p*0.50)
P.append(p*0.76)
P.append(p*0.99)
P.append(p*0.76)
P.append(p*0.50)
P.append(p*0.25)
P.append(p*0.1)
else:
P.append(p*0.1)
P.append(p*0.2)
P.append(p*0.3)
P.append(p*0.5)
P.append(p*0.8)
P.append(p*0.99)
P.append(p*0.8)
P.append(p*0.5)
P.append(p*0.3)
P.append(p*0.2)
P.append(p*0.1)
return P
def gen_feature_vec(dict_begin,
img_hist_lst,
booster=BOOSTER,
ext='EXT',
scale=SCALE,
chop=CHOP,
stop=35000):
"""
do_boost: function for a fake gaussian distribution
to create a smeared (bigger) footprint
input:
dict_begin : project data as dict()
img_hist_lst: k-Means cluster per image
booster : # of boost to use - see do_boost()
ext : not used anymore
scale : scale of p-value
chop : can bu used to cut list() entries
stop : # for main loop to stop
output:
data : dict() with generated features
stats: statistics for RGB, HSV and LAB range
"""
print('img_hist_lst', len(img_hist_lst))
print('posting_id ', len(dict_begin['df_train_csv'].posting_id.values))
print('BOOSTER ', booster)
print('EXT ', ext)
print('SCALE ', scale)
print('CHOP ', chop)
print('stop ', stop)
data = dict()
data['hsv'] = pd.DataFrame()
data['rgb'] = | pd.DataFrame() | pandas.DataFrame |
from . import Kmeans
from . import GM
from . import radarPlot
from . import heatmap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pa
from scipy.sparse import csr_matrix, isspmatrix
from scipy.sparse import csgraph
from sklearn.preprocessing import normalize
from sklearn.metrics import pairwise_distances
import mpl_toolkits.mplot3d.axes3d as p3
import pylab as p
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
import sys
import os
import time
from matplotlib.lines import Line2D
from matplotlib.pyplot import cm
from collections import Counter
from gprofiler import gprofiler
import copy
import operator
import scipy
import seaborn as sns
import random
from gensim.models import HdpModel,LdaModel
from sklearn import cluster
from sklearn.neighbors import kneighbors_graph
from sklearn import metrics
class MICTI:
def __init__(self,data,geneNames,cellNames,k=None,cluster_label=None,cluster_assignment=None, th=0,seed=None, ensembel=False, organisum="hsapiens"):
self.data=data
self.k=k
self.th=th
self.geneNames=geneNames
self.cellNames=cellNames
self.seed=seed
self.ensembl=ensembel
self.organsm=organisum
self.cluster_assignment=cluster_assignment
self.cluster_label=cluster_label
self.color=cluster_assignment
self.color_dict={}
self.data_ICF=self.ICF(self.data)
self.initialize_colors()
def get_cluster_assignment(self):
return self.cluster_assignment
def initialize_colors(self):
colors=['#ffe119','#0082c8','#f58231','#911eb4','#46f0f0','#f032e6','#d2f53c','#fabebe','#008080','#e6beff',
'#aa6e28','#fffac8','#800000','#aaffc3','#808000','#ffd8b1','#000080','#808080','#FFFFFF','#000000'][:self.k]
cell_type=pa.Series([self.cluster_label[j] for j in self.cluster_assignment])
cell_type=cell_type.sort_values()
lut2=dict(zip(cell_type.sort_values().unique(), colors))
lut2=dict(sorted(lut2.items()))
col_colors= cell_type.map(lut2)
col_colors.index= | pa.Series(self.cellNames) | pandas.Series |
# coding: utf-8
import json
import numpy as np
import pandas as pd
import glob
import seaborn as sns
import os, json
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # Pour faire des graphiques 3D
from sklearn.neighbors import KNeighborsClassifier
#Enter the file path where all the JSON files are located
path_to_json = '/kate_data'
game_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
#Enter Players to Study
players_to_study = [8,9]
#Assumption : By mentioning 1,2,3, etc. as players, we assume that the corresponding json files are 1.json, 2.json, 3.json, etc..
def norm(vect):
sum = 0
for el in vect:
sum += el**2
return np.sqrt(sum)
# Let's create a Panda's dataFrame with position, time, rotatio, BPM of each frame of the game and a second Dataframe with the balloons gathering data
def create_df_hand(game_file):
with open(game_file) as json_file:
data = json.load(json_file)
df_game = pd.DataFrame(data['datasList'][0]['listLevelDatas'][0]['userDatas'])
for i in range(1,len(data['datasList'][0]['listLevelDatas'])):
df_game = pd.concat([df_game, pd.DataFrame(data['datasList'][0]['listLevelDatas'][i]['userDatas'])])
#getting rid of the timeStamp's zero
df_game = df_game[df_game['timeStamp']>0]
#reset index after having got rid of the timeStamp zeros
df_game = df_game.reset_index(drop = True)
#let's create three new columns, each one with one coordinate for df_game:
#If they show later to be useless, we supprime these lines to get rid of them
position = df_game['headPos'].apply(pd.Series)
df_game = pd.concat([df_game, position], axis=1)
#Drops the duplicated rows in the Hololens DataSet
indexes_to_drop = []
for index, row in df_game.iterrows():
if index != 0:
if df_game.loc[index, 'timeStamp'] == df_game.loc[index-1, 'timeStamp']:
indexes_to_drop.append(index)
#print('length indexes_to_drop:', len(indexes_to_drop))
df_game.drop(df_game.index[indexes_to_drop], inplace=True)
df_game = df_game.reset_index(drop = True)
#Fixes the bug in the timeOfDestroy and timeOfSpawn that came with Hololens Data (values were reseting)
for index, row in df_game.iterrows():
if index != 0:
if df_game.loc[index, 'timeStamp'] < df_game.loc[index-1, 'timeStamp']:
for idx in range(index,len(df_game)):
df_game.at[idx, 'timeStamp'] = df_game.at[idx, 'timeStamp'] +df_game.at[index-1, 'timeStamp']
#Here we create a column withe a 5-element tuple: (x,y,z,t, rotation) for each dataframe
df_game['head_positions'] = df_game[['x', 'y', 'z', 'timeStamp', 'headRotationY']].apply(lambda x: tuple(x), axis=1)
return df_game
def create_df_balloon(game_file):
with open(game_file) as json_file:
data = json.load(json_file)
df_balloon = | pd.DataFrame(data['datasList'][0]['listLevelDatas'][0]['listBalloonDatas']) | pandas.DataFrame |
import numpy as np
import json
import os
from random import shuffle
import pandas as pd
from tqdm import tqdm
from utils import *
import nltk
import argparse
parser = argparse.ArgumentParser(description='PreprocessWikiTables', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--tables_path', default='tables_redi2_1')
parser.add_argument('--collection_path', default='wikiTables/features2.csv')
parser.add_argument('--output_folder', default='wikiTables')
parser.add_argument('--output_file', default='data_fields_with_struct_values2.json')
args = parser.parse_args()
data_folder = args.tables_path
data_csv=pd.read_csv(args.collection_path)
tokenizer=nltk.WordPunctTokenizer()
stop_words=nltk.corpus.stopwords.words('english')
def prepare_table(test_table):
attributes = test_table['title']
pgTitle = test_table['pgTitle']
secondTitle = test_table['secondTitle']
caption = test_table['caption']
data = test_table['data']
pgTitle_feat = preprocess_seq(pgTitle, tokenizer,stop_words)
secondTitle_feat = preprocess_seq(secondTitle, tokenizer,stop_words)
caption_feat = preprocess_seq(caption, tokenizer,stop_words)
#pgTitle_feat=' '.join(pgTitle_feat)
#secondTitle_feat = ' '.join(secondTitle_feat)
#caption_feat = ' '.join(caption_feat)
data_csv = pd.DataFrame(data, columns=attributes)
attributes = list(data_csv)
inter_att = ' '.join(attributes)
all_att_tokens = preprocess_seq(inter_att, tokenizer,stop_words)
if len(all_att_tokens) == 0:
data_csv = data_csv.transpose()
# vec_att = np.array(attributes).reshape(-1, 1)
data_csv_array = np.array(data_csv)
# data_csv_array = np.concatenate([vec_att, data_csv_array], axis=1)
if data_csv_array.size > 0:
attributes = data_csv_array[0, :]
inter_att = ' '.join(attributes)
all_att_tokens = preprocess_seq(inter_att, tokenizer, stop_words)
data_csv = | pd.DataFrame(data_csv_array, columns=attributes) | pandas.DataFrame |
from copy import deepcopy
from functools import reduce
import operator
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
import pandapower as pp
import pandapower.topology as top
from pandapower.grid_equivalents.auxiliary import drop_internal_branch_elements, ensure_origin_id
from pandapower.grid_equivalents.get_equivalent import get_equivalent, \
merge_internal_net_and_equivalent_external_net
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
try:
from misc.groups import Group
group_imported = True
except ImportError:
group_imported = False
try:
from simbench import voltlvl_idx
simbench_imported = True
except ImportError:
simbench_imported = False
logger = logging.getLogger(__name__)
def getFromDict(dict_, keys):
""" Get value from nested dict """
return reduce(operator.getitem, keys, dict_)
def setInDict(dict_, keys, value):
""" Set value to nested dict """
getFromDict(dict_, keys[:-1])[keys[-1]] = value
def appendSetInDict(dict_, keys, set_):
""" Use case specific: append existing value of type set in nested dict """
getFromDict(dict_, keys[:-1])[keys[-1]] |= set_
def setSetInDict(dict_, keys, set_):
""" Use case specific: set new or append existing value of type set in nested dict """
if isinstance(getFromDict(dict_, keys[:-1]), dict):
if keys[-1] in getFromDict(dict_, keys[:-1]).keys():
if isinstance(getFromDict(dict_, keys), set):
appendSetInDict(dict_, keys, set_)
else:
raise ValueError("The set in the nested dict cannot be appended since it actually "
"is not a set but a " + str(type(getFromDict(dict_, keys))))
else:
setInDict(dict_, keys, set_)
else:
raise ValueError("This function expects a dict for 'getFromDict(dict_, " + str(keys[:-1]) +
")', not a" + str(type(getFromDict(dict_, keys[:-1]))))
def append_set_to_dict(dict_, set_, keys):
""" Appends a nested dict by the values of a set, independant if the keys already exist or not.
"""
keys = pp.ensure_iterability(keys)
# ensure that the dict way to the last key exist
for pos, _ in enumerate(keys[:-1]):
if isinstance(getFromDict(dict_, keys[:pos]), dict):
if keys[pos] not in getFromDict(dict_, keys[:pos]).keys():
setInDict(dict_, keys[:pos + 1], dict())
else:
raise ValueError("This function expects a dict for 'getFromDict(dict_, " +
str(keys[:pos]) + ")', not a" + str(type(getFromDict(
dict_, keys[:pos]))))
# set the value
setSetInDict(dict_, keys, set_)
def eq_name(eq_type, other_zone=None, zone=None, number=None):
number_str = "" if number is None else " %i" % number
st = "%s%s equivalent" % (eq_type, number_str)
if other_zone is not None:
st += " of zone "
if isinstance(other_zone, str):
st += "'%s'" % other_zone
else:
st += str(other_zone)
if zone is not None:
st += " at zone "
if isinstance(zone, str):
st += "'%s'" % zone
else:
st += str(zone)
return st
def set_bus_zone_by_boundary_branches(net, all_boundary_branches):
"""
Set integer values (0, 1, 2, ...) to net.bus.zone with regard to the given boundary branches in
'all_boundary_branches'.
INPUT:
**net** - pandapowerNet
**all_boundary_branches** (dict) - defines which element indices are boundary branches.
The dict keys must be pandapower elements, e.g. "line" or "trafo"
"""
include = dict.fromkeys(["line", "dcline", "trafo", "trafo3w", "impedance"])
for elm in include.keys():
if elm in all_boundary_branches.keys():
include[elm] = net[elm].index.difference(all_boundary_branches[elm])
else:
include[elm] = True
mg = top.create_nxgraph(net, include_lines=include["line"], include_impedances=include["impedance"],
include_dclines=include["dcline"], include_trafos=include["trafo"],
include_trafo3ws=include["trafo3w"])
cc = top.connected_components(mg)
ccl = [list(c) for c in cc]
areas = []
while len(ccl):
# check intersections of the first area with all other unchecked areas (remains in ccl) and
# then add first area unionized with all intersectioned other areas to "areas"
areas += [ccl.pop(0)]
n_last_area = -1
while n_last_area != len(areas[-1]):
# check as long as len(areas[-1]) not changes anymore - needed because there can be
# intersections of remaining areas with the buses added to areas[-1]
# within the last while loop iteration via union
n_last_area = len(areas[-1])
for i, c in enumerate(ccl):
if np.intersect1d(c, areas[-1]):
areas[-1] = np.union1d(areas[-1], ccl.pop(i))
for i, area in enumerate(areas):
net.bus.zone.loc[area] = i
def get_branch_power(net, bus, power_type, branches_dict=None):
"""
Sums power of branches connected to 'bus'. The power is summed negative (= how much power flows
into the bus).
INPUT:
**net** - pandapower net
**bus** (int) - index of the bus whose connected branches power flows are summed
**power_type** (str) - should be "p_mw" or "q_mvar"
OPTIONAL:
**branches_dict** (dict, None) - if given, only branches within 'branches_dict' are
considered for summing the power. An exemplary input is {"line": {0, 1, 2}, "trafo": {1}}.
"""
connected_branches = pp.get_connected_elements_dict(
net, [bus], connected_buses=False, connected_bus_elements=False,
connected_branch_elements=True, connected_other_elements=False)
power = 0
bus_types = ["from_bus", "to_bus", "hv_bus", "lv_bus", "mv_bus"]
for elm, idxs in connected_branches.items():
if branches_dict is not None:
if elm in branches_dict.keys():
idxs = set(branches_dict[elm]).intersection(set(idxs))
else:
continue
for idx in idxs:
for bus_type in bus_types:
if bus_type in net[elm].columns and net[elm][bus_type].at[idx] == bus:
col = power_type[0] + "_" + bus_type.split("_")[0] + power_type[1:]
power -= net["res_" + elm][col].at[idx]
break
return power
def _create_eq_elms(net, buses, elm, branches=None, idx_start=None, sign=1,
name=None, zone=None, other_zone=None, **kwargs):
"""
Internal function of create_eq_loads() or create_eq_gens()
"""
name = name if name is not None else f"equivalent {elm}"
# --- check existing results and return if not available
cols = {"load": ["p_mw", "q_mvar"], "gen": ["p_mw", "vm_pu"]}[elm]
if len(buses - set(net.res_bus.index)) or net.res_bus.loc[
buses, cols].isnull().any().any():
logger.warning(f"No {elm}s could be added to 'net_ib_eq_load' since bus results " +
"are missing.")
return pd.Index()
# --- run functionality
if branches is not None:
branches_has_buses_keys = not len(set(branches.keys()).symmetric_difference(set(buses)))
names = pp.ensure_iterability(name, len(buses))
new_idx = []
for no, (bus, name) in enumerate(zip(buses, names)):
bra = branches if branches is None or not branches_has_buses_keys else branches[bus]
idx = idx_start + no if idx_start is not None else None
p = sign * get_branch_power(net, bus, "p_mw", bra)
if elm == "load":
q = sign * get_branch_power(net, bus, "q_mvar", bra)
new = pp.create_load(net, bus, p, q, name=name, index=idx, **kwargs)
elif elm == "gen":
vm = net.res_bus.vm_pu.at[bus]
new = pp.create_gen(net, bus, p, vm, name=name, index=idx, **kwargs)
else:
raise NotImplementedError(f"elm={elm} is not implemented.")
if "origin_id" in net[elm].columns:
net[elm].origin_id.loc[new] = eq_name(elm, other_zone, zone, number=no)
new_idx.append(new)
return | pd.Index(new_idx) | pandas.Index |
import logging
import multiprocessing as mp
from multiprocessing.pool import Pool
import pandas as pd
from .. import util
_logger = logging.getLogger(__name__)
_rec_context = None
class MPRecContext:
def __init__(self, algo):
self.algo = algo
def __enter__(self):
global _rec_context
_logger.debug('installing context for %s', self.algo)
_rec_context = self
return self
def __exit__(self, *args, **kwargs):
global _rec_context
_logger.debug('uninstalling context for %s', self.algo)
_rec_context = None
def _predict_user(algo, user, udf):
watch = util.Stopwatch()
res = algo.predict_for_user(user, udf['item'])
res = pd.DataFrame({'user': user, 'item': res.index, 'prediction': res.values})
_logger.debug('%s produced %d/%d predictions for %s in %s',
algo, res.prediction.notna().sum(), len(udf), user, watch)
return res
def _predict_worker(job):
user, udf = job
res = _predict_user(_rec_context.algo, user, udf)
return res.to_msgpack()
def predict(algo, pairs, nprocs=None):
"""
Generate predictions for user-item pairs. The provided algorithm should be a
:py:class:`algorithms.Predictor` or a function of two arguments: the user ID and
a list of item IDs. It should return a dictionary or a :py:class:`pandas.Series`
mapping item IDs to predictions.
Args:
algo(lenskit.algorithms.Predictor):
A rating predictor function or algorithm.
pairs(pandas.DataFrame):
A data frame of (``user``, ``item``) pairs to predict for. If this frame also
contains a ``rating`` column, it will be included in the result.
nprocs(int):
The number of processes to use for parallel batch prediction.
Returns:
pandas.DataFrame:
a frame with columns ``user``, ``item``, and ``prediction`` containing
the prediction results. If ``pairs`` contains a `rating` column, this
result will also contain a `rating` column.
"""
if nprocs and nprocs > 1 and mp.get_start_method() == 'fork':
_logger.info('starting predict process with %d workers', nprocs)
with MPRecContext(algo), Pool(nprocs) as pool:
results = pool.map(_predict_worker, pairs.groupby('user'))
results = [pd.read_msgpack(r) for r in results]
_logger.info('finished predictions')
else:
results = []
for user, udf in pairs.groupby('user'):
res = _predict_user(algo, user, udf)
results.append(res)
results = | pd.concat(results) | pandas.concat |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def read_file(filename):
labels = ["futures", "title", "wait", "exec", "duration", "us_future", "queue", "numa_sensitive", "num_threads", "info_string", "libcds"]
data = pd.read_csv(filename, sep=',', header=None)
data.columns = labels
return data
def get_files_data(threads):
filenames = []
for t in threads:
filenames.append( "thread_" + str(t) + ".txt" )
rawdata = []
for f in filenames:
rawdata.append(read_file(f))
data = | pd.concat(rawdata) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.