prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import collections
from decimal import Decimal
class Trades:
"""
A class to represent all the trades and possibly the ledger status.
Attributes
----------
:param _trades: pandas dataframe with the trades information
:para _ledgers: (optional) pandas dataframe with the ledger information
Methods
-------
__iter__()
Loops through the trades dataframe
balance_check()
Checks the coherence in the ledger file
readKrakeCSV()
Reads file and transform it into a pandas dataframe object
"""
TIME_COL = "time"
TXID_COL = "txid"
PAIR_COL = "pair"
TYPE_COL = "type"
PRICE_COL = "price"
COST_COL = "cost"
FEE_COL = "fee"
VOL_COL = "vol"
ASSET_COL = "asset"
AMOUNT_COL = "amount"
BALANCE_COL ="balance"
LEDGER_COL = "ledgers"
def __init__(self, trades_file, ledger_file = None):
"""
Construction of the trades (and ledger) objects
:param trades_file: (str) file location
:param ledger_file: (str) file location
"""
self._trades = Trades.readKrakenCSV(trades_file)
self._ledger = Trades.readKrakenCSV(ledger_file) if ledger_file else None
# TODO check balance check
# TODO create trades check : price*vol = cost
def __iter__(self):
"""
Iterator that loops on the trades
"""
return self._trades.iterrows()
def balance_check(self):
""" Balance check based on the ledger information
:returns : True if coherent
False if non coherent
:raises ValueError: if attempting to read a non existing ledger
# TODO loop could be optimized
"""
if self._ledger is None: raise ValueError("Theres is no ledger loaded.")
ledger_status = collections.defaultdict(lambda: Decimal("0"))
ledger_calculus = collections.defaultdict(lambda: Decimal("0"))
for _, row in self._ledger.iterrows():
if type(row[Trades.TXID_COL]) == str and row[Trades.TXID_COL]:
ledger_calculus[Trades.ASSET_COL] += row[Trades.AMOUNT_COL] - row[Trades.FEE_COL]
ledger_status[Trades.ASSET_COL] = row[Trades.BALANCE_COL]
for asset in ledger_status:
if ledger_status[asset] != ledger_calculus[asset]: return False
return True
@staticmethod
def readKrakenCSV(file):
"""
Static method to read and convert trades into a pandas dataframe
:param file: (str) file location
:return : pandas.DataFrame (trades or ledger)
"""
df = | pd.read_csv(file) | pandas.read_csv |
#Rule 22 - Latitude and Longitude values must have a precision of 6 digits after the decimal point. They should not contain special characters, including spaces
def latitide_longitude(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="Latitude_Longitude"
regex = re.compile('[@!#$%^&*()<>?/\|}{~:]')
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_apply=to_check['columns_to_apply']
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
for index,row in df.iterrows():
latitude=str(row['LATITUDE'])
longitude=str(row['LONGITUDE'])
if(len(latitude)==3):
entry=[index,fleName,'This row does not have latitude value']
print('The row '+str(index)+' in the file Location_v1.40.xlsx does not have latitude value')
data.append(entry)
elif(len(longitude)==3):
entry=[index,fleName,'This row does not have longitude value']
print('The row '+str(index)+' in the file Location_v1.40.xlsx does not have longitude value')
data.append(entry)
elif(len(latitude.split('.')[1])<6):
entry=[index,fleName,'Latitude value '+latitude+' has less than 6 digits after decimal point']
print('The row '+str(index)+' in the file Location_v1.40.xlsx has less or more than 6 digits after decimal point in latitude')
data.append(entry)
elif(len(longitude.split('.')[1])<6):
entry=[index,fleName,'Longitude value '+longitude+' has less than 6 digits after decimal point']
print('The row '+str(index)+' in the file Location_v1.40.xlsx has less or more than 6 digits after decimal point in longitude')
data.append(entry)
elif(len(latitude.split('.')[1])>6):
entry=[index,fleName,'Latitude value '+latitude+' has more than 6 digits after decimal point']
print('The row '+str(index)+' in the file Location_v1.40.xlsx has less or more than 6 digits after decimal point in latitude')
data.append(entry)
elif(len(longitude.split('.')[1])>6):
entry=[index,fleName,'Longitude value '+longitude+' has more than 6 digits after decimal point']
print('The row '+str(index)+' in the file Location_v1.40.xlsx has less or more than 6 digits after decimal point in longitude')
data.append(entry)
elif((regex.search(latitude)!=None)):
entry=[index,fleName,'Latitude value '+latitude+' has special characters']
print('The row '+str(index)+' in the file Location_v1.40.xlsx has special characters in latitude')
data.append(entry)
elif((regex.search(longitude)!= None)):
entry=[index,fleName,'Longitude value '+longitude+' has special characters']
print('The row '+str(index)+' in the file Location_v1.40.xlsx has special characters in longitude')
data.append(entry)
df1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])
if( | ExcelFile(target) | pandas.ExcelFile |
import collections
import logging
import os
import pprint
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as cartif
import core.signal_processing as csigna
import helpers.git as git
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class Test__compute_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_df()
output_df = csigna._compute_lagged_cumsum(input_df, 3)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 3, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test_lag_1(self) -> None:
input_df = self._get_df()
input_df.columns = ["x", "y1", "y2"]
output_df = csigna._compute_lagged_cumsum(input_df, 1, ["y1", "y2"])
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_df() -> pd.DataFrame:
df = pd.DataFrame([list(range(10))] * 3).T
df[1] = df[0] + 1
df[2] = df[0] + 2
df.index = pd.date_range(start="2010-01-01", periods=10)
df.rename(columns=lambda x: f"col_{x}", inplace=True)
return df
class Test_correlate_with_lagged_cumsum(hut.TestCase):
def test1(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1", "y2"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
def test2(self) -> None:
input_df = self._get_arma_df()
output_df = csigna.correlate_with_lagged_cumsum(
input_df, 3, y_vars=["y1"], x_vars=["x"]
)
self.check_string(
f"{hprint.frame('input')}\n"
f"{hut.convert_df_to_string(input_df, index=True)}\n"
f"{hprint.frame('output')}\n"
f"{hut.convert_df_to_string(output_df, index=True)}"
)
@staticmethod
def _get_arma_df(seed: int = 0) -> pd.DataFrame:
arma_process = cartif.ArmaProcess([], [])
date_range = {"start": "2010-01-01", "periods": 40, "freq": "M"}
srs1 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed
).rename("x")
srs2 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 1
).rename("y1")
srs3 = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed + 2
).rename("y2")
return pd.concat([srs1, srs2, srs3], axis=1)
class Test_accumulate(hut.TestCase):
def test1(self) -> None:
srs = pd.Series(
range(0, 20), index=pd.date_range("2010-01-01", periods=20)
)
actual = csigna.accumulate(srs, num_steps=1)
expected = srs.astype(float)
pd.testing.assert_series_equal(actual, expected)
def test2(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test3(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[np.nan, np.nan, 3, 6, 9, 12, 15, 18, 21, 24], index=idx
)
pd.testing.assert_series_equal(actual, expected)
def test4(self) -> None:
srs = pd.Series(
np.random.randn(100), index=pd.date_range("2010-01-01", periods=100)
)
output = pd.concat([srs, csigna.accumulate(srs, num_steps=5)], axis=1)
output.columns = ["series", "series_accumulated"]
self.check_string(hut.convert_df_to_string(output, index=True))
def test_long_step1(self) -> None:
idx = pd.date_range("2010-01-01", periods=3)
srs = pd.Series([1, 2, 3], index=idx)
actual = csigna.accumulate(srs, num_steps=5)
expected = pd.Series([np.nan, np.nan, np.nan], index=idx)
pd.testing.assert_series_equal(actual, expected)
def test_nans1(self) -> None:
idx = pd.date_range("2010-01-01", periods=10)
srs = pd.Series([0, 1, np.nan, 2, 3, 4, np.nan, 5, 6, 7], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
9,
np.nan,
np.nan,
np.nan,
18,
],
index=idx,
)
pd.testing.assert_series_equal(actual, expected)
def test_nans2(self) -> None:
idx = pd.date_range("2010-01-01", periods=6)
srs = pd.Series([np.nan, np.nan, np.nan, 2, 3, 4], index=idx)
actual = csigna.accumulate(srs, num_steps=3)
expected = pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 9], index=idx
)
pd.testing.assert_series_equal(actual, expected)
def test_nans3(self) -> None:
idx = pd.date_range("2010-01-01", periods=6)
srs = pd.Series([np.nan, np.nan, np.nan, 2, 3, 4], index=idx)
actual = csigna.accumulate(srs, num_steps=2)
expected = pd.Series([np.nan, np.nan, np.nan, np.nan, 5, 7], index=idx)
pd.testing.assert_series_equal(actual, expected)
class Test_get_symmetric_equisized_bins(hut.TestCase):
def test_zero_in_bin_interior_false(self) -> None:
input_ = pd.Series([-1, 3])
expected = np.array([-3, -2, -1, 0, 1, 2, 3])
actual = csigna.get_symmetric_equisized_bins(input_, 1)
np.testing.assert_array_equal(actual, expected)
def test_zero_in_bin_interior_true(self) -> None:
input_ = pd.Series([-1, 3])
expected = np.array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5])
actual = csigna.get_symmetric_equisized_bins(input_, 1, True)
np.testing.assert_array_equal(actual, expected)
def test_infs(self) -> None:
data = pd.Series([-1, np.inf, -np.inf, 3])
expected = np.array([-4, -2, 0, 2, 4])
actual = csigna.get_symmetric_equisized_bins(data, 2)
np.testing.assert_array_equal(actual, expected)
class Test_compute_rolling_zscore1(hut.TestCase):
def test_default_values1(self) -> None:
"""
Test with default parameters on a heaviside series.
"""
heaviside = cartif.get_heaviside(-10, 252, 1, 1).rename("input")
actual = csigna.compute_rolling_zscore(heaviside, tau=40).rename("output")
output_df = pd.concat([heaviside, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_default_values2(self) -> None:
"""
Test for tau with default parameters on a heaviside series.
"""
heaviside = cartif.get_heaviside(-10, 252, 1, 1).rename("input")
actual = csigna.compute_rolling_zscore(heaviside, tau=20).rename("output")
output_df = pd.concat([heaviside, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_clean1(self) -> None:
"""
Test on a clean arma series.
"""
series = self._get_arma_series(seed=1)
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_nan1(self) -> None:
"""
Test on an arma series with leading NaNs.
"""
series = self._get_arma_series(seed=1)
series[:5] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_nan2(self) -> None:
"""
Test on an arma series with interspersed NaNs.
"""
series = self._get_arma_series(seed=1)
series[5:10] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_zero1(self) -> None:
"""
Test on an arma series with leading zeros.
"""
series = self._get_arma_series(seed=1)
series[:5] = 0
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_zero2(self) -> None:
"""
Test on an arma series with interspersed zeros.
"""
series = self._get_arma_series(seed=1)
series[5:10] = 0
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_atol1(self) -> None:
"""
Test on an arma series with all-zeros period and `atol>0`.
"""
series = self._get_arma_series(seed=1)
series[10:25] = 0
actual = csigna.compute_rolling_zscore(series, tau=2, atol=0.01).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_inf1(self) -> None:
"""
Test on an arma series with leading infs.
"""
series = self._get_arma_series(seed=1)
series[:5] = np.inf
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_arma_inf2(self) -> None:
"""
Test on an arma series with interspersed infs.
"""
series = self._get_arma_series(seed=1)
series[5:10] = np.inf
actual = csigna.compute_rolling_zscore(series, tau=20).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_clean1(self) -> None:
"""
Test on a clean arma series when `delay=1`.
"""
series = self._get_arma_series(seed=1)
actual = csigna.compute_rolling_zscore(series, tau=20, delay=1).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_nan1(self) -> None:
"""
Test on an arma series with leading NaNs when `delay=1`.
"""
series = self._get_arma_series(seed=1)
series[:5] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20, delay=1).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_nan2(self) -> None:
"""
Test on an arma series with interspersed NaNs when `delay=1`.
"""
series = self._get_arma_series(seed=1)
series[5:10] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20, delay=1).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_zero1(self) -> None:
"""
Test on an arma series with leading zeros when `delay=1`.
"""
series = self._get_arma_series(seed=1)
series[:5] = 0
actual = csigna.compute_rolling_zscore(series, tau=20, delay=1).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_zero2(self) -> None:
"""
Test on an arma series with interspersed zeros when `delay=1`.
"""
series = self._get_arma_series(seed=1)
series[5:10] = 0
actual = csigna.compute_rolling_zscore(series, tau=20, delay=1).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_atol1(self) -> None:
"""
Test on an arma series with all-zeros period, `delay=1` and `atol>0`.
"""
series = self._get_arma_series(seed=1)
series[10:25] = 0
actual = csigna.compute_rolling_zscore(
series, tau=2, delay=1, atol=0.01
).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_inf1(self) -> None:
"""
Test on an arma series with leading infs when `delay=1`.
"""
series = self._get_arma_series(seed=1)
series[:5] = np.inf
actual = csigna.compute_rolling_zscore(series, tau=20, delay=1).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay1_arma_inf2(self) -> None:
"""
Test on an arma series with interspersed infs when `delay=1`.
"""
series = self._get_arma_series(seed=1)
series[5:10] = np.inf
actual = csigna.compute_rolling_zscore(series, tau=20, delay=1).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_clean1(self) -> None:
"""
Test on a clean arma series when `delay=2`.
"""
series = self._get_arma_series(seed=1)
actual = csigna.compute_rolling_zscore(series, tau=20, delay=2).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_nan1(self) -> None:
"""
Test on an arma series with leading NaNs when `delay=2`.
"""
series = self._get_arma_series(seed=1)
series[:5] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20, delay=2).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_nan2(self) -> None:
"""
Test on an arma series with interspersed NaNs when `delay=2`.
"""
series = self._get_arma_series(seed=1)
series[5:10] = np.nan
actual = csigna.compute_rolling_zscore(series, tau=20, delay=2).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_zero1(self) -> None:
"""
Test on an arma series with leading zeros when `delay=2`.
"""
series = self._get_arma_series(seed=1)
series[:5] = 0
actual = csigna.compute_rolling_zscore(series, tau=20, delay=2).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_zero2(self) -> None:
"""
Test on an arma series with interspersed zeros when `delay=2`.
"""
series = self._get_arma_series(seed=1)
series[5:10] = 0
actual = csigna.compute_rolling_zscore(series, tau=20, delay=2).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_atol1(self) -> None:
"""
Test on an arma series with all-zeros period, `delay=2` and `atol>0`.
"""
series = self._get_arma_series(seed=1)
series[10:25] = 0
actual = csigna.compute_rolling_zscore(
series, tau=2, delay=2, atol=0.01
).rename("output")
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_inf1(self) -> None:
"""
Test on an arma series with leading infs when `delay=2`.
"""
series = self._get_arma_series(seed=1)
series[:5] = np.inf
actual = csigna.compute_rolling_zscore(series, tau=20, delay=2).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
def test_delay2_arma_inf2(self) -> None:
"""
Test on an arma series with interspersed infs when `delay=2`.
"""
series = self._get_arma_series(seed=1)
series[5:10] = np.inf
actual = csigna.compute_rolling_zscore(series, tau=20, delay=2).rename(
"output"
)
output_df = pd.concat([series, actual], axis=1)
output_df_string = hut.convert_df_to_string(output_df, index=True)
self.check_string(output_df_string)
@staticmethod
def _get_arma_series(seed: int) -> pd.Series:
arma_process = cartif.ArmaProcess([1], [1])
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed
).rename("input")
return series
class Test_process_outliers1(hut.TestCase):
def test_winsorize1(self) -> None:
srs = self._get_data1()
mode = "winsorize"
lower_quantile = 0.01
# Check.
self._helper(srs, mode, lower_quantile)
def test_set_to_nan1(self) -> None:
srs = self._get_data1()
mode = "set_to_nan"
lower_quantile = 0.01
# Check.
self._helper(srs, mode, lower_quantile)
def test_set_to_zero1(self) -> None:
srs = self._get_data1()
mode = "set_to_zero"
lower_quantile = 0.01
# Check.
self._helper(srs, mode, lower_quantile)
def test_winsorize2(self) -> None:
srs = self._get_data2()
mode = "winsorize"
lower_quantile = 0.2
# Check.
self._helper(srs, mode, lower_quantile, num_df_rows=len(srs))
def test_set_to_nan2(self) -> None:
srs = self._get_data2()
mode = "set_to_nan"
lower_quantile = 0.2
# Check.
self._helper(srs, mode, lower_quantile, num_df_rows=len(srs))
def test_set_to_zero2(self) -> None:
srs = self._get_data2()
mode = "set_to_zero"
lower_quantile = 0.2
upper_quantile = 0.5
# Check.
self._helper(
srs,
mode,
lower_quantile,
num_df_rows=len(srs),
upper_quantile=upper_quantile,
)
def _helper(
self,
srs: pd.Series,
mode: str,
lower_quantile: float,
num_df_rows: int = 10,
window: int = 100,
min_periods: Optional[int] = 2,
**kwargs: Any,
) -> None:
info: collections.OrderedDict = collections.OrderedDict()
srs_out = csigna.process_outliers(
srs,
mode,
lower_quantile,
window=window,
min_periods=min_periods,
info=info,
**kwargs,
)
txt = []
txt.append("# info")
txt.append(pprint.pformat(info))
txt.append("# srs_out")
txt.append(str(srs_out.head(num_df_rows)))
self.check_string("\n".join(txt))
@staticmethod
def _get_data1() -> pd.Series:
np.random.seed(100)
n = 100000
data = np.random.normal(loc=0.0, scale=1.0, size=n)
return | pd.Series(data) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pathway_file = '../../drp-data/pathways/9606.enrichr_pathway.edge'
pathway = pd.read_csv(pathway_file, sep='\t', header=None)
pathway['norm_score'] = pathway[2]/pathway[2].max()
print("pathways:", pathway[0].nunique())
print("pathway genes:", pathway[1].nunique())
gsc_filtered = '../../KnowEng_GSC/GSC_10mod/drawr_filtered/DraWR_GSC_Enrichr_STRINGExp.xlsx'
ppi_file = '../../drp-data/pathways/9606.STRING_experimental.edge'
ppi = pd.read_csv(ppi_file, sep='\t', header=None)
print("PPI original edges:", len(ppi))
ppi['norm_score'] = ppi[2]/ppi[2].max()
ppi = ppi.loc[ppi['norm_score'] > 0.5]
print("PPI filtered edges:", len(ppi))
nodes = list(set(ppi[0]).union(set(ppi[1])))
print("PPI nodes:", len(nodes) )
folder = 'CX_ens10'
mean_attribution_file = 'results/CX_ens10/all_attributions.csv'
feature_attr = pd.read_csv(mean_attribution_file, index_col=0)
top_genes_file = 'results/CX_ens10/top_genes_mean_aggregation_info.xlsx'
writer_a = | pd.ExcelWriter('results/%s/one_hop_cyto.xlsx'%folder, engine='xlsxwriter') | pandas.ExcelWriter |
from os.path import join, isfile
from flask import Flask, render_template, make_response, jsonify, request, redirect, abort
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from flask_mail import Mail
from flask_uploads import UploadSet, IMAGES, configure_uploads
from flask_pagedown import PageDown
from flask_migrate import Migrate
from flask_httpauth import HTTPBasicAuth
from flask import jsonify
import markdown
import bleach
import pandas as pd
from sqlalchemy import and_, or_, not_
from sqlalchemy import text
import time
from datetime import datetime, date, timedelta
app = Flask(__name__, instance_relative_config=True)
if isfile(join('instance', 'flask_full.cfg')):
app.config.from_pyfile('flask_full.cfg')
else:
app.config.from_pyfile('flask.cfg')
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
mail = Mail(app)
pagedown = PageDown(app)
migrate = Migrate(app, db)
auth = HTTPBasicAuth()
auth_token = HTTPBasicAuth()
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "users.login"
# Configure the image uploading via Flask-Uploads
images = UploadSet('images', IMAGES)
configure_uploads(app, images)
from project import db
from project.models import *
from project.utilities import get_age_string, get_duration_string, get_payout_string, markdown_to_safe_html
from project.utilities import resized_image_url_from_url, get_sparkline_data_from_content, get_voters_list_from_content
from project.utilities import tlog
import json
from contextlib import suppress
from steem.blockchain import Blockchain
from steem import Steem
steem = Steem(nodes=app.config['STEEM_NODES'])
def create_video_summary_fields(df, filter_data={}):
# temporary fix dlive thumbnails to https to prevent SSL warning
# df = df[~(df['video_thumbnail_image_url']==None)]
df['video_thumbnail_image_url'] = df['video_thumbnail_image_url'].apply(lambda x: x.replace('http://', 'https://') if x != None else '')
df = df[~pd.isnull(df['created'])]
df = df[~(df['video_id']=='c')] # remove erroneous records todo - remove once db refreshed
df = df[~(df['video_id']=='user')] # remove erroneous records todo - remove once db refreshed
df['duration_string'] = df['video_duration_seconds'].apply(get_duration_string)
df['age_string'] = df['created'].apply(get_age_string)
df['video_post_delay_days'] = df['video_post_publish_delay_seconds'] // (3600 * 24)
df['payout_string'] = (df['pending_payout_value'] + df['total_payout_value']).apply(lambda x: get_payout_string(x))
df['title'] = df['title'].apply(lambda x: markdown_to_safe_html(x))
df['title_truncated'] = df['title'].apply(lambda x: x[:80])
# experimental resizing through free image proxy/cache
df['video_thumbnail_image_url'] = df['video_thumbnail_image_url'].apply(lambda x: resized_image_url_from_url(x))
return df[['author', 'permlink', 'category', 'title', 'title_truncated', 'created', 'age_string', 'payout_string',
'duration_string', 'video_type', 'video_id', 'video_thumbnail_image_url', 'video_post_delay_days',
'trending_score', 'hot_score', 'votes_sparkline_data']]
# appends the query dict (from json filter data) to existing query
def apply_filter_to_query(original_query, filter_data):
new_query = original_query
if filter_data.get('filter_age_selection', 'all') == 'hour':
new_query = new_query.filter(Post.created > (datetime.now() - timedelta(hours=1)))
elif filter_data.get('filter_age_selection', 'all') == 'today':
new_query = new_query.filter(Post.created > (datetime.now() - timedelta(hours=24)))
elif filter_data.get('filter_age_selection', 'all') == 'week':
new_query = new_query.filter(Post.created > (datetime.now() - timedelta(days=7)))
elif filter_data.get('filter_age_selection', 'all') == 'month':
new_query = new_query.filter(Post.created > (datetime.now() - timedelta(days=30)))
# video type inclusions filter (requires posts to be of type in filter)
inclusions_list = filter_data.get('filter_included_types', [])
type_filter_list = []
for video_type in inclusions_list:
type_filter_list.append(Post.video_type == video_type)
if type_filter_list:
new_query = new_query.filter(or_(*type_filter_list))
if filter_data.get('filter_duration_selection', 'all') == 'short':
new_query = new_query.filter(Post.video_duration_seconds <= 240)
elif filter_data.get('filter_duration_selection', 'all') == 'long':
new_query = new_query.filter(Post.video_duration_seconds > 1200)
if filter_data.get('filter_exclude_old_video', 'false') == 'true':
new_query = new_query.filter(Post.video_post_publish_delay_seconds < (7*24*3600))
if filter_data.get('filter_exclude_nsfw', 'false') == 'true':
new_query = new_query.filter(not_(Post.is_nsfw))
# author exclusions filter
inclusions_list = filter_data.get('filter_excluded_authors', [])
author_filter_list = []
for account in inclusions_list:
author_filter_list.append(Post.author == account)
if author_filter_list:
new_query = new_query.filter(not_(or_(*author_filter_list)))
# author inclusions filter
inclusions_list = filter_data.get('filter_included_authors', [])
author_filter_list = []
for account in inclusions_list:
author_filter_list.append(Post.author == account)
if author_filter_list:
new_query = new_query.filter(or_(*author_filter_list))
# voter exclusions filter (removes posts voted by any of first five accounts in filter)
exclusions_list = filter_data.get('filter_excluded_voters', [])[:5]
voter_filter_list = []
for account in exclusions_list:
voter_filter_list.append(Post.voters_list_ts_vector.match(account, postgresql_regconfig='english'))
if voter_filter_list:
new_query = new_query.filter(not_(or_(*voter_filter_list)))
# voter inclusions filter (requires posts voted by any of first five accounts in filter)
inclusions_list = filter_data.get('filter_included_voters', [])[:5]
voter_filter_list = []
for account in inclusions_list:
voter_filter_list.append(Post.voters_list_ts_vector.match(account, postgresql_regconfig='english'))
if voter_filter_list:
new_query = new_query.filter(or_(*voter_filter_list))
# tag exclusions filter (removes posts with any of first tags in filter)
exclusions_list = filter_data.get('filter_excluded_tags', [])[:10]
tags_filter_list = []
for account in exclusions_list:
tags_filter_list.append(Post.tags_ts_vector.match(account, postgresql_regconfig='english'))
if tags_filter_list:
new_query = new_query.filter(not_(or_(*tags_filter_list)))
# tag inclusions filter (requires posts with any of first tags in filter)
inclusions_list = filter_data.get('filter_included_tags', [])[:10]
tags_filter_list = []
for account in inclusions_list:
tags_filter_list.append(Post.tags_ts_vector.match(account, postgresql_regconfig='english'))
if tags_filter_list:
new_query = new_query.filter(or_(*tags_filter_list))
return new_query
def apply_sort_to_query(original_query, filter_data):
new_query = original_query
sort_order = Post.pending_payout_value.desc()
if filter_data.get('filter_sort_selection', 'all') == 'date':
sort_order = Post.created.desc()
elif filter_data.get('filter_sort_selection', 'all') == 'payout':
sort_order = Post.pending_payout_value.desc()
elif filter_data.get('filter_sort_selection', 'all') == 'trending':
sort_order = Post.trending_score.desc()
elif filter_data.get('filter_sort_selection', 'all') == 'hot':
sort_order = Post.hot_score.desc()
return new_query.order_by(sort_order)
# PUBLIC PAGES ########################################################
@app.route('/f/api/trending-videos/<limit>', methods=['GET', 'POST'])
@app.route('/f/api/trending-videos', methods=['GET', 'POST'])
def trending_videos(limit="30"):
limit = int(limit)
filter_data = {}
query = db.session.query(Post)
if request.method == 'POST':
data = request.data
filter_data = json.loads(data)
query = apply_filter_to_query(query, filter_data)
# get more records than needed as post query filters may remove some
query = query.order_by(Post.trending_score.desc()).limit(int(limit*1.2))
df = pd.read_sql(query.statement, db.session.bind)
df = create_video_summary_fields(df, filter_data)
df = df.head(limit)
return df.to_json(orient='records')
@app.route('/f/api/hot-videos/<limit>', methods=['GET', 'POST'])
@app.route('/f/api/hot-videos', methods=['GET', 'POST'])
def hot_videos(limit="30"):
limit = int(limit)
filter_data = {}
query = db.session.query(Post)
if request.method == 'POST':
data = request.data
filter_data = json.loads(data)
query = apply_filter_to_query(query, filter_data)
# get more records than needed as post query filters may remove some
query = query.order_by(Post.hot_score.desc()).limit(int(limit*1.2))
df = pd.read_sql(query.statement, db.session.bind)
df = create_video_summary_fields(df, filter_data)
df = df.head(limit)
return df.to_json(orient='records')
@app.route('/f/api/new-videos/<limit>', methods=['GET', 'POST'])
@app.route('/f/api/new-videos', methods=['GET', 'POST'])
def new_videos(limit="30"):
limit = int(limit)
filter_data = {}
try:
query = db.session.query(Post)
if request.method == 'POST':
data = request.data
filter_data = json.loads(data)
query = apply_filter_to_query(query, filter_data)
# get more records than needed as post query filters may remove some
query = query.order_by(Post.created.desc()).limit(int(limit*1.2))
df = pd.read_sql(query.statement, db.session.bind)
df = create_video_summary_fields(df, filter_data)
df = df.head(limit)
return df.to_json(orient='records')
except Exception as e:
return str(e)
@app.route('/f/api/account-videos/<author>/<limit>', methods=['GET', 'POST'])
@app.route('/f/api/account-videos/<author>', methods=['GET', 'POST'])
def account_videos(author, limit="30"):
limit = int(limit)
filter_data = {}
try:
author_filter = (Post.author == author)
query = db.session.query(Post).filter(author_filter)
if request.method == 'POST':
data = request.data
filter_data = json.loads(data)
query = apply_filter_to_query(query, filter_data)
# get more records than needed as post query filters may remove some
query = query.order_by(Post.created.desc()).limit(int(limit*1.2))
df = pd.read_sql(query.statement, db.session.bind)
df = create_video_summary_fields(df, filter_data)
df = df.head(limit)
return df.to_json(orient='records')
except Exception as e:
return str(e)
@app.route('/f/api/related-videos/<author>/<limit>', methods=['GET', 'POST'])
@app.route('/f/api/related-videos/<author>', methods=['GET', 'POST'])
def related_videos(author, limit="30"):
limit = int(limit)
filter_data = {}
try:
# get author videos
author_filter = (Post.author == author)
query = db.session.query(Post).filter(Post.created > (datetime.now() - timedelta(days=7))).filter(author_filter)
if request.method == 'POST':
data = request.data
filter_data = json.loads(data)
query = apply_filter_to_query(query, filter_data)
# get more records than needed as post query filters may remove some
query = query.order_by(Post.created.desc()).limit(int(limit*1.2))
author_df = pd.read_sql(query.statement, db.session.bind)
# get author voted videos
query = db.session.query(Post).filter(Post.created > (datetime.now() - timedelta(days=7))).filter(Post.voters_list_ts_vector.match(author, postgresql_regconfig='english'))
if request.method == 'POST':
data = request.data
filter_data = json.loads(data)
query = apply_filter_to_query(query, filter_data)
# get more records than needed as post query filters may remove some
query = query.order_by(Post.created.desc()).limit(int(limit*1.2))
author_voted_df = | pd.read_sql(query.statement, db.session.bind) | pandas.read_sql |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestSeriesCombine:
def test_combine_scalar(self):
# GH 21248
# Note - combine() with another Series is tested elsewhere because
# it is used when testing operators
s = pd.Series([i * 10 for i in range(5)])
result = s.combine(3, lambda x, y: x + y)
expected = pd.Series([i * 10 + 3 for i in range(5)])
tm.assert_series_equal(result, expected)
result = s.combine(22, lambda x, y: min(x, y))
expected = pd.Series([min(i * 10, 22) for i in range(5)])
tm.assert_series_equal(result, expected)
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", pd.Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", pd.Series([10, 61, 12])),
([61, 63], float, pd.Series([10.0, 61.0, 12.0])),
([61, 63], object, pd.Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61.0, 63.0], "int32", pd.Series([10, 61, 12], dtype="int32")),
([61.0, 63.0], "int64", pd.Series([10, 61, 12])),
([61.0, 63.0], float, pd.Series([10.0, 61.0, 12.0])),
([61.0, 63.0], object, pd.Series([10, 61.0, 12], dtype=object)),
# others is float, cannot be cast to int
([61.1, 63.1], "int32", pd.Series([10.0, 61.1, 12.0])),
([61.1, 63.1], "int64", pd.Series([10.0, 61.1, 12.0])),
([61.1, 63.1], float, pd.Series([10.0, 61.1, 12.0])),
([61.1, 63.1], object, pd.Series([10, 61.1, 12], dtype=object)),
# other is object, cannot be cast
([(61,), (63,)], "int32", pd.Series([10, (61,), 12])),
([(61,), (63,)], "int64", | pd.Series([10, (61,), 12]) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------
# Penji OpDev Fall 2019
# GS Run Wrapper
# Author: <NAME>
# Updated:
# ------------------------
# General
import os
import argparse
import pandas as pd
# For Google Sheets
import pygsheets
# Local
import core.utils as utils
from core import logger
import core.gs_api_utils as gs_api_
from core.config import cfg
from core.gs_parent import GoogleSheetsParent
class GoogleSheetsPullFromArchive(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
# Ensure all files are created and saved properly
self._setup_all()
self.df_ct_prof = self._load('ct_prof')
self.df_ct_courses = self._load('ct_courses')
self.df_arch_prof = self._load('archive_prof')
self.df_arch_courses = self._load('archive_courses')
def run(self, *args, **kwargs):
# Pull data for prof file
self.df_ct_prof = self.df_ct_prof.apply(self.pull_arch_prof, axis=1)
# Pull data for courses file
#self.df_ct_courses = self.df_ct_courses.apply(self.pull_arch_courses, axis=1)
print(self.df_ct_prof)
if self.args.save:
self._save(self.df_ct_prof, 'ct_prof')
self._save(self.df_ct_courses, 'ct_courses')
def pull_arch_prof(self, row):
try:
for ir in self.df_arch_prof.itertuples():
if ir[1] == row['Full Name'] and '@' in str(ir[5]):
print(ir[1], ir[5])
row['Email'] = ir[5]
row['Previous Response'] = ir[6]
row['Term Last Sent'] = ir[7]
break
except:
logger.warn(f'Empty Archive Professor CSV')
return row
def pull_arch_courses(self, row):
try:
for ir in self.df_arch_courses.itertuples():
if ir[1] == row['Course Code'] and not pd.isna(ir[4]):
print(ir[1], ir[4])
row['Archive Demand In'] = ir[4]
break
except:
logger.warn(f'Empty Archive Course CSV')
return row
class GoogleSheetsPrep(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
self.df_ct = self._load('ct')
self.df_ct_prof = self._load('ct_prof')
self.df_ct_courses = self._load('ct_courses')
self.df_arch_courses = self._load('archive_courses')
def run(self, *args, **kwargs):
""" Sets up Professor df for google sheets upload
Needs demand and ranking in order to deduct desired course
Professor Row Reference #s
"""
# Process Current term CSV: Demand, Ranking, Professor Row #
self.df_ct = self.df_ct.apply(self.process_cur_term_csv, axis=1)
# Process Professor CSV: Demand, Ranking, Professor Row #
self.df_ct_prof = self.df_ct_prof.apply(self.process_prof_courses, axis=1)
# Clear out those temporary values
self.df_ct = self.df_ct.apply(self.clear_temp_values, axis=1)
if self.args.save:
self._save(self.df_ct, 'ct')
self._save(self.df_ct_prof, 'ct_prof')
else:
print(self.df_ct)
print(self.df_ct_prof)
def clear_temp_values(self, row):
row['Demand'], row['Ranking'] = None, None
return row
def process_cur_term_csv(self, row):
# Term Sheet: Demand Column
demand = 3 # Default
try:
for ir in self.df_ct_courses.itertuples():
if ir[1] == row['Course Code'] and not pd.isna(ir[6]):
print(ir[1], ir[6])
demand = ir[6]
break
except:
logger.warn(f'Empty Archive Course CSV')
ranking = demand + (row['# Students'] / 100)
# Term Sheet: Professor Row Reference #
row_references = []
if isinstance(row['Professor'], str):
prof_names_in = row['Professor'].split(', ')
for ir in self.df_ct_prof.itertuples():
[row_references.append(ir[0]+2) for name in prof_names_in if ir[1] == name]
assert len(prof_names_in) == len(row_references), \
f'ERROR: prof names {prof_names_in} != {row_references} row references'
row['Demand'], row['Ranking'], row['Professor Row #'] = demand, ranking, row_references
return row
def process_prof_courses(self, row):
# Professor Sheet: All Courses
# Don't select a class if no email available
all_courses = [] # (None, None, 0) # Course Code, Course Row #, Ranking
best_course = (None, None, 0)
if '@' in str(row['Email']):
prof_name = row['<NAME>']
for ir in self.df_ct.itertuples():
if ir[15] and str(row.name+2) in str(ir[15])[1:-1].split(', '):
all_courses.append((ir[1], ir[0]+2, ir[11]))
if all_courses:
# Find their course with the highest ranking
for course in all_courses:
if course[2] > best_course[2]:
best_course = course
else:
all_courses = None
row['Desired Course Code'] = best_course[0]
row['Desired Course Row #'] = int(best_course[1]) if best_course[1] else best_course[1]
row['All Courses'] = all_courses
return row
class GoogleSheetsUpload(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
self.status_arr = ['No', 'Sent for different course', 'Match Error', 'Awaiting Response', 'Yes']
def run(self):
# Create a new sheet in folder
sh = self._connect_google_sheet()
# Make sure the sheets are setup properly
gs_df_arr = self._load_all()
self.num_wks = len(gs_df_arr)
# TODO: Professor row Reference #s add 2
# Find number of rows and columns for each
shapes = []
setup_formulas = [self.setup_term_formulas, self.setup_professor_formulas, self.setup_course_formulas,
None, None, None, None, None, self.setup_arch_course_formulas]
for idx in range(len(gs_df_arr)):
# load csv as pd df and upload it
gs_df = gs_df_arr[idx]
shapes.append(gs_df.shape)
# Create new sheets
if self.reset_gs:
wks = sh.add_worksheet(self.files[self.file_keys[idx]][1], rows=shapes[idx][0]+10, cols=shapes[idx][1], index=idx)
if idx == 0:
sh.del_worksheet(sh.worksheet_by_title('Sheet1'))
else:
wks = sh[idx]
# Upload the data
if self.args.data:
wks.set_dataframe(gs_df, (1, 1))
wks.replace('NaN', '')
# Add The Formulas
if self.args.formulas and setup_formulas[idx]:
term = self.pterm if idx in (3,4,5) else self.cterm
setup_formulas[idx](wks, term)
if self.args.format:
self.format_sheet(sh, shapes)
def format_sheet(self, sh, shapes):
# Format Tutor Columns
gs_api_.format_tutor_col(sh=sh, wks=sh[0], shape=shapes[0], col_idx=10) # Current Term
gs_api_.format_tutor_col(sh=sh, wks=sh[2], shape=shapes[2], col_idx=7) # Current Courses
gs_api_.format_tutor_col(sh=sh, wks=sh[3], shape=shapes[3], col_idx=10) # Prev Term
gs_api_.format_tutor_col(sh=sh, wks=sh[5], shape=shapes[5], col_idx=7) # Prev Courses
gs_api_.format_tutor_col(sh=sh, wks=sh[8], shape=shapes[8], col_idx=6) # Archive Courses
# Freeze first row of each wks
[gs_api_.freeze_row(sh=sh, wks=sh[i]) for i in range(self.num_wks)]
# Headers of editable columns: Add blue background
editable_col_cells = [sh[1].cell('G1'), sh[1].cell('H1'), sh[1].cell('I1'),
sh[1].cell('J1'), sh[1].cell('K1'), sh[1].cell('L1'), sh[2].cell('E1'),
sh[4].cell('G1'), sh[4].cell('H1'), sh[4].cell('I1'),
sh[4].cell('J1'), sh[4].cell('K1'), sh[4].cell('L1'), sh[5].cell('E1')]
for cell in editable_col_cells:
cell.color = (207/255, 226/255, 243/255, 1.0)
tutors_range = sh[6].get_values('A1', 'O1', returnas='range')
for cell in tutors_range[0]:
cell.color = (207/255, 226/255, 243/255, 1.0)
# All Headers: Set Bold
# Current Term
[cell.set_text_format('bold', True) for cell in sh[0].get_values('A1', 'Q1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[1].get_values('A1', 'P1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[2].get_values('A1', 'G1', returnas='range')[0]]
# Previous Term
[cell.set_text_format('bold', True) for cell in sh[3].get_values('A1', 'Q1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[4].get_values('A1', 'P1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[5].get_values('A1', 'G1', returnas='range')[0]]
# Tutors & Archive
[cell.set_text_format('bold', True) for cell in sh[6].get_values('A1', 'O1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[7].get_values('A1', 'G1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[8].get_values('A1', 'F1', returnas='range')[0]]
# Format Status Column
gs_api_.format_status_col(sh=sh, wks=sh[0], shape=shapes[0], col_idx=17, stat_arr=self.status_arr)
gs_api_.format_status_col(sh=sh, wks=sh[3], shape=shapes[3], col_idx=17, stat_arr=self.status_arr)
def setup_term_formulas(self, wks, term):
# Demand
wks.cell('B1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"Demand", VLOOKUP(A1:A, ' + f"'Courses {term[2]}'" + '!$A:$D, 4, FALSE)))'
# Previous Response
wks.cell('H1').formula = 'ArrayFormula(IF(ROW(C:C)=1,"Previous Response",IF(ISBLANK(G1:G), "", ' \
'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + '!$A:$F, 6, False))))'
# # Tutors
wks.cell('J1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", COUNTIFS(' \
f'Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&C1:C&D1:D&"*", Tutors!I:I, "TRUE", Tutors!J:J,"YES"))))'
# Ranking
wks.cell('K1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"Ranking", IF(ISBLANK(A1:A), "", B1:B+(I1:I/100))))'
# Course Status: color coded professor info
self.status_arr = stat = ['No', 'Sent for different course', 'Match Error', 'Awaiting Response', 'Yes']
wks.cell('Q1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"Status", IF(ISBLANK(A1:A), "", ' \
f'IFERROR(IF((O1:O="[]") + (VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 14, False) = "") > 0, "{stat[2]}", ' \
f'IFERROR(IFS(VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 9, False)="No", "{stat[0]}",' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 9, False)="Yes", "{stat[4]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 8, False)="No", "{stat[0]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 8, False)="Yes", "{stat[4]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 6, False)="No", "{stat[0]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 6, False)="Yes", "{stat[4]}" ), ' \
f'IF(NE(A1:A, VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 14, False)), "{stat[1]}", ' \
f'IF(VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 12, False)="Fall 19","{stat[3]}",)))),"{stat[2]}" ))))'
def setup_professor_formulas(self, wks, term):
# Previous Response
# To Send
wks.cell('M1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"To Send",IF(ISBLANK(A:A),"", ' \
'IF(RegExMatch(E1:E,"@"), ' \
'IFERROR(' \
'IFS(L1:L="Fall 19", "No",F1:F="No", "No",H1:H="No", "No",I1:I="No", "No"),' \
' "Yes"), "No"))))'
def setup_course_formulas(self, wks, term):
# Demand out
wks.cell('D1').formula = 'ArrayFormula(IF(ROW(F:F)=1,"Demand Out", IFS(' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)>5, 5, ' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)<0, 0, ' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)<5, IF((F1:F), F1:F+E1:E, 3+E1:E))))'
# Demand in
wks.cell('F1').formula = 'ArrayFormula(IF(ROW(E:E)=1,"Archive Demand In", ' \
'IFERROR(VLOOKUP(A1:A, '+"'Spring 19'"+'!$A:$B, 2, FALSE), )))'
# # Tutors
wks.cell('G1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", ' \
f'COUNTIFS(Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&SUBSTITUTE(A1:A," ","")&"*", ' \
f'Tutors!I:I, "TRUE", Tutors!J:J, "YES"))))'
def setup_arch_course_formulas(self, wks, term):
# # Tutors
wks.cell('F1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", ' \
f'COUNTIFS(Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&SUBSTITUTE(A1:A," ","")&"*", ' \
f'Tutors!I:I, "TRUE", Tutors!J:J, "YES"))))'
class GoogleSheetsUploadStudentOrgs(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
def run(self):
# Create a new sheet in folder
sh = self._connect_google_sheet(sheet_name_in=f'{self.school_config.NICE_NAME} Student Orgs')
gs_df = self._load(file_name_key='student_orgs')
shape = gs_df.shape
if self.reset_gs:
wks = sh.add_worksheet(self.files['student_orgs'][1], rows=shape[0] + 10, cols=shape[1], index=0)
sh.del_worksheet(sh.worksheet_by_title('Sheet1'))
else:
wks = sh[0]
# Upload the data
if self.args.data:
wks.set_dataframe(gs_df, (1, 1))
wks.replace('NaN', '')
if self.args.format:
#self.format_sheet(sh, shape)
[cell.set_text_format('bold', True) for cell in sh[0].get_values('A1', 'C1', returnas='range')[0]]
class GoogleSheetsDownload:
def __init__(self, args):
self.args = args
def run(self):
""" Pulls Data From GS Previous Term and saves to proper csv format
"""
config = cfg[self.args.school.upper()]
sheet_name = f'{config.NICE_NAME} Class List' # f'{config.NICE_NAME} Course List'
gc = pygsheets.authorize(service_file='core/credentials/penji_dev_key.json')
try:
sh = gc.open(sheet_name)
logger.info(f'Found {sheet_name} in google drive, downloading sheet')
except pygsheets.exceptions.SpreadsheetNotFound:
logger.error(f'Could not find {sheet_name} in google drive')
return
self.download_tutor_csv(sh)
df = self.download_prev_term_csv(sh)
df.rename(columns={'Class': 'Course Code'}, inplace=True)
print(df.head())
# prof_df = self.cu_prof_setup(df)
#
# course_df = self.cu_courses_setup(df)
#
# self.add_prev_term_to_archive()
#
# gs_utils.save(gs_df=prof_df, term=cfg.GENERAL.PREV_TERM, school=self.args.school,
# sheet_idx=2, test=self.args.test)
# gs_utils.save(gs_df=course_df, term=cfg.GENERAL.PREV_TERM, school=self.args.school,
# sheet_idx=3, test=self.args.test)
self.add_prev_term_to_archive(df)
def download_tutor_csv(self, sh):
wks = sh.worksheet_by_title("Tutors")
tutors_df = wks.get_as_df()
tutors_df = tutors_df[tutors_df['Date Added'] != '']
tutors_df = tutors_df.drop('', axis=1)
self._save(gs_df=tutors_df, file_name=f'data/{self.args.school}/archive/gs_archive_{self.args.school}_6.csv')
def download_prev_term_csv(self, sh):
wks = sh.worksheet_by_title("Spring '19")
df = wks.get_as_df()
df = df[df['Course'] != '']
df = df.drop('', axis=1)
gs_utils.save(gs_df=df, term=cfg.GENERAL.PREV_TERM, school=self.args.school,
sheet_idx=0, test=self.args.test)
return df
def add_prev_term_to_archive(self, term_df):
file_names = (f'data/{self.args.school}/archive/gs_archive_{self.args.school}_7.csv',
f'data/{self.args.school}/archive/gs_archive_{self.args.school}_8.csv')
arch_prof_df, arch_course_df = self.load_archive_files(file_names)
ca_data = {key: [] for key in cfg.GENERAL.WKS_COLUMNS['Course Archive']}
pr_data = {key: [] for key in cfg.GENERAL.WKS_COLUMNS['Professor Archive']}
for idx, col in term_df.iterrows():
if term_df['Course Code'][idx] not in ca_data['Course Code']:
ca_data['Course Code'].append(term_df['Course Code'][idx])
ca_data['Name'].append(term_df['Name'][idx])
ca_data['Title'].append(term_df['Title'][idx])
ca_data['Previous Demand'].append(term_df['Demand'][idx])
ca_data['Term Last Updated'].append('Spring 19')
if term_df['Professor'][idx] not in pr_data['Full Name']:
pr_data['Full Name'].append(term_df['Professor'][idx])
pr_data['First Name'].append(term_df['First Name'][idx])
pr_data['Last Name'].append(term_df['Last Name'][idx])
# Find Previous Response : Previous response, Pre-approval status, LHP
prev_response = (' ', term_df['Previous Response'][idx], term_df['Pre-approval status'][idx],
term_df['LHP Response (yes/no)'][idx])
cons_response = [resp for resp in prev_response if resp != '']
pr_data['Previous Response'].append(cons_response[-1])
pr_data['Term Last Sent'].append('Spring 19')
num_rows = len(ca_data['Course Code'])
gs_ca_data = {key: ([None] * num_rows if lst == [] else lst) for key, lst in ca_data.items()}
courses_df = | pd.DataFrame(data=gs_ca_data, columns=cfg.GENERAL.WKS_COLUMNS['Course Archive']) | pandas.DataFrame |
# encoding: utf-8
# (c) 2017-2020 Open Risk (https://www.openriskmanagement.com)
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
""" This module provides the key transition matrix objects
* CreditCurve_ implements the functionality of a collection of credit (default curves)
* TransitionMatrix_ implements the functionality of single period transition matrix
* TransitionMatrixSet_ provides a container for a multiperiod transition matrix collection
* StateSpace holds information about the stochastic system state space
* EmpiricalTransitionMatrix implements the functionality of a continuously observed transition matrix
.. moduleauthor: Open Risk
"""
import json
import numpy as np
import os
import pandas as pd
import transitionMatrix as tm
from scipy.linalg import logm, expm
def matrix_exponent(generator, t=1.0):
""" Compute the exponent of a transition matrix generator
:param t: the timescale parameter
:type t: float
:Example:
A = G.exponent()
"""
exponent = tm.TransitionMatrix(expm(t * generator))
return exponent
class CreditCurve(np.matrix):
""" The _`CreditCurve` object implements a typical collection of `credit curves <https://www.openriskmanual.org/wiki/Credit_Curve>`_.
The class inherits from numpy matrices and implements additional properties specific to curves.
"""
def __new__(cls, values=None, json_file=None, csv_file=None):
""" Create a new credit curve set. Different options for initialization are:
* providing values as a list of list
* providing values as a numpy array (The rows are the different curves, the columns are different periods)
* loading from a csv file
* loading from a json file
Without data, a default identity matrix is generated with user specified dimension
:param values: initialization values
:param json_file: a json file containing transition matrix data
:param csv_file: a csv file containing transition matrix data
:type values: list of lists or numpy array
:returns: returns a CreditCurve object
:rtype: object
.. note:: The initialization in itself does not validate if the provided values form indeed a credit curve
:Example:
.. code-block:: python
A = tm.CreditCurve(values=[[0.1, 0.2, 0.3], [0.2, 0.6, 0.8], [0.01, 0.02, 0.06]])
"""
obj = None
if values is not None:
# Initialize with given values
obj = np.asarray(values).view(cls)
elif json_file is not None:
# Initialize from file in json format
q = pd.read_json(json_file)
obj = np.asarray(q.values).view(cls)
elif csv_file is not None:
# Initialize from file in csv format
q = pd.read_csv(csv_file, index_col=None)
obj = np.asarray(q.values).view(cls)
# validation flag is set to False at initialization
obj.validated = False
# temporary dimension assignment (must validated for squareness)
obj.dimension = obj.shape[0]
return obj
def to_json(self, file):
"""
Write credit curves to file in json format
:param file: json filename
"""
q = pd.DataFrame(self)
q.to_json(file, orient='values')
def to_csv(self, file):
"""
Write credit curves to file in csv format
:param file: csv filename
"""
q = pd.DataFrame(self)
q.to_csv(file, index=None)
def to_html(self, file=None):
html_table = pd.DataFrame(self).to_html()
if file is not None:
file = open(file, 'w')
file.write(html_table)
file.close()
return html_table
def validate(self, accuracy=1e-3):
""" Validate required properties of a credit curve set. The following are checked
1. check that all values are probabilities (between 0 and 1)
2. check that values are non-decreasing
:param accuracy: accuracy level to use for validation
:type accuracy: float
:returns: List of tuples with validation messages
"""
validation_messages = []
curve_set = self
curve_set_size = curve_set.shape[0]
curve_set_periods = curve_set.shape[1]
# checking that values of curve_set are within allowed range
for i in range(curve_set_size):
for j in range(curve_set_periods):
if curve_set[i, j] < 0:
validation_messages.append(("Negative Probabilities: ", (i, j, curve_set[i, j])))
if curve_set[i, j] > 1:
validation_messages.append(("Probabilities Larger than 1: ", (i, j, curve_set[i, j])))
# checking monotonicity
for i in range(curve_set_size):
for j in range(1, curve_set_periods):
if curve_set[i, j] < curve_set[i, j - 1]:
validation_messages.append(("Curve not monotonic: ", (i, j)))
if len(validation_messages) == 0:
self.validated = True
return self.validated
else:
self.validated = False
return validation_messages
def hazard_curve(self):
""" Compute hazard rates
.. Todo:: Compute hazard rates
:return: TODO
"""
pass
def characterize(self):
""" Analyse or classify a credit curve according to its properties
* slope of hazard rate
.. Todo:: Further characterization
"""
pass
def print(self, format_type='Standard', accuracy=2):
""" Pretty print a set of credit curves
:param format_type: formatting options (Standard, Percent)
:type format_type: str
:param accuracy: number of decimals to display
:type accuracy: int
"""
for s_in in range(self.shape[0]):
for s_out in range(self.shape[1]):
if format_type is 'Standard':
format_string = "{0:." + str(accuracy) + "f}"
print(format_string.format(self[s_in, s_out]) + ' ', end='')
elif format_type is 'Percent':
print("{0:.2f}%".format(100 * self[s_in, s_out]) + ' ', end='')
print('')
print('')
class TransitionMatrix(np.matrix):
""" The _`TransitionMatrix` object implements a typical (one period) `transition matrix <https://www.openriskmanual.org/wiki/Transition_Matrix>`_.
The class inherits from numpy matrices and implements additional properties specific transition matrices. It forms the building block of the
TransitionMatrixSet_ which holds a collection of matrices in increasing temporal order
"""
def __new__(cls, values=None, dimension=2, json_file=None, csv_file=None):
""" Create a new transition matrix. Different options for initialization are:
* providing values as a list of list
* providing values as a numpy array
* loading from a csv file
* loading from a json file
Without data, a default identity matrix is generated with user specified dimension
:param values: initialization values
:param dimension: matrix dimensionality (default is 2)
:param json_file: a json file containing transition matrix data
:param csv_file: a csv file containing transition matrix data
:type values: list of lists or numpy array
:type dimension: int
:returns: returns a TransitionMatrix object
:rtype: object
.. note:: The initialization in itself does not validate if the provided values form indeed a transition matrix
:Example:
.. code-block:: python
A = tm.TransitionMatrix(values=[[0.6, 0.2, 0.2], [0.2, 0.6, 0.2], [0.2, 0.2, 0.6]])
"""
if values is not None:
# Initialize with given values
obj = np.asarray(values).view(cls)
elif json_file is not None:
# Initialize from file in json format
q = pd.read_json(json_file)
obj = np.asarray(q.values).view(cls)
elif csv_file is not None:
# Initialize from file in csv format
q = pd.read_csv(csv_file, index_col=None)
obj = np.asarray(q.values).view(cls)
else:
# Default instance (2x2 identity matrix)
default = np.identity(dimension)
obj = np.asarray(default).view(cls)
# validation flag is set to False at initialization
obj.validated = False
# temporary dimension assignment (must validated for squareness)
obj.dimension = obj.shape[0]
return obj
def row(self, i):
"""
Return row values
:param i: row index
"""
row = []
matrix_size = self.shape[0]
for j in range(matrix_size):
row.append(self[i, j])
return row
def to_json(self, file):
"""
Write transition matrix to file in json format
:param file: json filename
"""
q = pd.DataFrame(self)
q.to_json(file, orient='values')
def to_csv(self, file):
"""
Write transition matrix to file in csv format
:param file: csv filename
"""
q = pd.DataFrame(self)
q.to_csv(file, index=None)
def to_html(self, file=None):
html_table = | pd.DataFrame(self) | pandas.DataFrame |
from datetime import datetime
from pandas.compat import range, long, zip
from pandas import compat
import re
import numpy as np
from pandas.core.algorithms import unique
from pandas.tseries.offsets import DateOffset
from pandas.util.decorators import cache_readonly
import pandas.tseries.offsets as offsets
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
class FreqGroup(object):
FR_ANN = 1000
FR_QTR = 2000
FR_MTH = 3000
FR_WK = 4000
FR_BUS = 5000
FR_DAY = 6000
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
FR_MS = 10000
FR_US = 11000
FR_NS = 12000
class Resolution(object):
RESO_US = tslib.US_RESO
RESO_MS = tslib.MS_RESO
RESO_SEC = tslib.S_RESO
RESO_MIN = tslib.T_RESO
RESO_HR = tslib.H_RESO
RESO_DAY = tslib.D_RESO
_reso_str_map = {
RESO_US: 'microsecond',
RESO_MS: 'millisecond',
RESO_SEC: 'second',
RESO_MIN: 'minute',
RESO_HR: 'hour',
RESO_DAY: 'day'}
_str_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_str_map)])
_reso_freq_map = {
'year': 'A',
'quarter': 'Q',
'month': 'M',
'day': 'D',
'hour': 'H',
'minute': 'T',
'second': 'S',
'millisecond': 'L',
'microsecond': 'U',
'nanosecond': 'N'}
_freq_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_freq_map)])
@classmethod
def get_str(cls, reso):
return cls._reso_str_map.get(reso, 'day')
@classmethod
def get_reso(cls, resostr):
return cls._str_reso_map.get(resostr, cls.RESO_DAY)
@classmethod
def get_freq(cls, resostr):
return cls._reso_freq_map[resostr]
@classmethod
def get_str_from_freq(cls, freq):
return cls._freq_reso_map.get(freq, 'day')
@classmethod
def get_reso_from_freq(cls, freq):
return cls.get_reso(cls.get_str_from_freq(freq))
def get_reso_string(reso):
return Resolution.get_str(reso)
def get_to_timestamp_base(base):
if base < FreqGroup.FR_BUS:
return FreqGroup.FR_DAY
if FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC:
return FreqGroup.FR_SEC
return base
def get_freq_group(freq):
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return (freq // 1000) * 1000
def get_freq(freq):
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return freq
def get_freq_code(freqstr):
"""
Parameters
----------
Returns
-------
"""
if isinstance(freqstr, DateOffset):
freqstr = (get_offset_name(freqstr), freqstr.n)
if isinstance(freqstr, tuple):
if (com.is_integer(freqstr[0]) and
com.is_integer(freqstr[1])):
# e.g., freqstr = (2000, 1)
return freqstr
else:
# e.g., freqstr = ('T', 5)
try:
code = _period_str_to_code(freqstr[0])
stride = freqstr[1]
except:
if com.is_integer(freqstr[1]):
raise
code = _period_str_to_code(freqstr[1])
stride = freqstr[0]
return code, stride
if com.is_integer(freqstr):
return (freqstr, 1)
base, stride = _base_and_stride(freqstr)
code = _period_str_to_code(base)
return code, stride
def _get_freq_str(base, mult=1):
code = _reverse_period_code_map.get(base)
if mult == 1:
return code
return str(mult) + code
#----------------------------------------------------------------------
# Offset names ("time rules") and related functions
from pandas.tseries.offsets import (Nano, Micro, Milli, Second, Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
BYearBegin, BYearEnd, _make_offset
)
try:
cday = CDay()
except NotImplementedError:
cday = None
#: cache of previously seen offsets
_offset_map = {}
_offset_to_period_map = {
'WEEKDAY': 'D',
'EOM': 'M',
'BM': 'M',
'BQS': 'Q',
'QS': 'Q',
'BQ': 'Q',
'BA': 'A',
'AS': 'A',
'BAS': 'A',
'MS': 'M',
'D': 'D',
'C': 'C',
'B': 'B',
'T': 'T',
'S': 'S',
'L': 'L',
'U': 'U',
'N': 'N',
'H': 'H',
'Q': 'Q',
'A': 'A',
'W': 'W',
'M': 'M'
}
need_suffix = ['QS', 'BQ', 'BQS', 'AS', 'BA', 'BAS']
_months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
for __prefix in need_suffix:
for _m in _months:
_offset_to_period_map['%s-%s' % (__prefix, _m)] = \
_offset_to_period_map[__prefix]
for __prefix in ['A', 'Q']:
for _m in _months:
_alias = '%s-%s' % (__prefix, _m)
_offset_to_period_map[_alias] = _alias
_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for _d in _days:
_offset_to_period_map['W-%s' % _d] = 'W-%s' % _d
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
_rule_aliases = {
# Legacy rules that will continue to map to their original values
# essentially for the rest of time
'WEEKDAY': 'B',
'EOM': 'BM',
'W@MON': 'W-MON',
'W@TUE': 'W-TUE',
'W@WED': 'W-WED',
'W@THU': 'W-THU',
'W@FRI': 'W-FRI',
'W@SAT': 'W-SAT',
'W@SUN': 'W-SUN',
'W': 'W-SUN',
'Q@JAN': 'BQ-JAN',
'Q@FEB': 'BQ-FEB',
'Q@MAR': 'BQ-MAR',
'Q': 'Q-DEC',
'A': 'A-DEC', # YearEnd(month=12),
'AS': 'AS-JAN', # YearBegin(month=1),
'BA': 'BA-DEC', # BYearEnd(month=12),
'BAS': 'BAS-JAN', # BYearBegin(month=1),
'A@JAN': 'BA-JAN',
'A@FEB': 'BA-FEB',
'A@MAR': 'BA-MAR',
'A@APR': 'BA-APR',
'A@MAY': 'BA-MAY',
'A@JUN': 'BA-JUN',
'A@JUL': 'BA-JUL',
'A@AUG': 'BA-AUG',
'A@SEP': 'BA-SEP',
'A@OCT': 'BA-OCT',
'A@NOV': 'BA-NOV',
'A@DEC': 'BA-DEC',
# lite aliases
'Min': 'T',
'min': 'T',
'ms': 'L',
'us': 'U'
}
#TODO: Can this be killed?
for _i, _weekday in enumerate(['MON', 'TUE', 'WED', 'THU', 'FRI']):
for _iweek in range(4):
_name = 'WOM-%d%s' % (_iweek + 1, _weekday)
_rule_aliases[_name.replace('-', '@')] = _name
# Note that _rule_aliases is not 1:1 (d[BA]==d[A@DEC]), and so traversal
# order matters when constructing an inverse. we pick one. #2331
_legacy_reverse_map = dict((v, k) for k, v in
reversed(sorted(compat.iteritems(_rule_aliases))))
def to_offset(freqstr):
"""
Return DateOffset object from string representation
Examples
--------
>>> to_offset('5Min')
Minute(5)
"""
if freqstr is None:
return None
if isinstance(freqstr, DateOffset):
return freqstr
if isinstance(freqstr, tuple):
name = freqstr[0]
stride = freqstr[1]
if isinstance(stride, compat.string_types):
name, stride = stride, name
name, _ = _base_and_stride(name)
delta = get_offset(name) * stride
else:
delta = None
stride_sign = None
try:
for stride, name, _ in opattern.findall(freqstr):
offset = get_offset(name)
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
stride = int(stride)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError("Could not evaluate %s" % freqstr)
if delta is None:
raise ValueError('Unable to understand %s as a frequency' % freqstr)
return delta
# hack to handle WOM-1MON
opattern = re.compile(r'([\-]?\d*)\s*([A-Za-z]+([\-@][\dA-Za-z\-]+)?)')
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
Examples
--------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
if not groups:
raise ValueError("Could not evaluate %s" % freqstr)
stride = groups.group(1)
if len(stride):
stride = int(stride)
else:
stride = 1
base = groups.group(2)
return (base, stride)
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
"""
return _base_and_stride(freqstr)[0]
_dont_uppercase = set(('MS', 'ms'))
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in _dont_uppercase:
name = name.upper()
if name in _rule_aliases:
name = _rule_aliases[name]
elif name.lower() in _rule_aliases:
name = _rule_aliases[name.lower()]
else:
if name in _rule_aliases:
name = _rule_aliases[name]
if name not in _offset_map:
try:
# generate and cache offset
offset = _make_offset(name)
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError('Bad rule name requested: %s.' % name)
_offset_map[name] = offset
return _offset_map[name]
getOffset = get_offset
def get_offset_name(offset):
"""
Return rule name associated with a DateOffset object
Examples
--------
get_offset_name(BMonthEnd(1)) --> 'EOM'
"""
if offset is None:
raise ValueError("Offset can't be none!")
# Hack because this is what it did before...
if isinstance(offset, BDay):
if offset.n != 1:
raise ValueError('Bad rule given: %s.' % 'BusinessDays')
else:
return offset.rule_code
try:
return offset.freqstr
except AttributeError:
# Bad offset, give useful error.
raise ValueError('Bad rule given: %s.' % offset)
def get_legacy_offset_name(offset):
"""
Return the pre pandas 0.8.0 name for the date offset
"""
name = offset.name
return _legacy_reverse_map.get(name, name)
def get_standard_freq(freq):
"""
Return the standardized frequency string
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return get_offset_name(freq)
code, stride = get_freq_code(freq)
return _get_freq_str(code, stride)
#----------------------------------------------------------------------
# Period codes
# period frequency constants corresponding to scikits timeseries
# originals
_period_code_map = {
# Annual freqs with various fiscal year ends.
# eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
"A-DEC": 1000, # Annual - December year end
"A-JAN": 1001, # Annual - January year end
"A-FEB": 1002, # Annual - February year end
"A-MAR": 1003, # Annual - March year end
"A-APR": 1004, # Annual - April year end
"A-MAY": 1005, # Annual - May year end
"A-JUN": 1006, # Annual - June year end
"A-JUL": 1007, # Annual - July year end
"A-AUG": 1008, # Annual - August year end
"A-SEP": 1009, # Annual - September year end
"A-OCT": 1010, # Annual - October year end
"A-NOV": 1011, # Annual - November year end
# Quarterly frequencies with various fiscal year ends.
# eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
"Q-DEC": 2000, # Quarterly - December year end
"Q-JAN": 2001, # Quarterly - January year end
"Q-FEB": 2002, # Quarterly - February year end
"Q-MAR": 2003, # Quarterly - March year end
"Q-APR": 2004, # Quarterly - April year end
"Q-MAY": 2005, # Quarterly - May year end
"Q-JUN": 2006, # Quarterly - June year end
"Q-JUL": 2007, # Quarterly - July year end
"Q-AUG": 2008, # Quarterly - August year end
"Q-SEP": 2009, # Quarterly - September year end
"Q-OCT": 2010, # Quarterly - October year end
"Q-NOV": 2011, # Quarterly - November year end
"M": 3000, # Monthly
"W-SUN": 4000, # Weekly - Sunday end of week
"W-MON": 4001, # Weekly - Monday end of week
"W-TUE": 4002, # Weekly - Tuesday end of week
"W-WED": 4003, # Weekly - Wednesday end of week
"W-THU": 4004, # Weekly - Thursday end of week
"W-FRI": 4005, # Weekly - Friday end of week
"W-SAT": 4006, # Weekly - Saturday end of week
"B": 5000, # Business days
"D": 6000, # Daily
"H": 7000, # Hourly
"T": 8000, # Minutely
"S": 9000, # Secondly
"L": 10000, # Millisecondly
"U": 11000, # Microsecondly
"N": 12000, # Nanosecondly
}
_reverse_period_code_map = {}
for _k, _v in compat.iteritems(_period_code_map):
_reverse_period_code_map[_v] = _k
# Additional aliases
_period_code_map.update({
"Q": 2000, # Quarterly - December year end (default quarterly)
"A": 1000, # Annual
"W": 4000, # Weekly
})
def _period_alias_dictionary():
"""
Build freq alias dictionary to support freqs from original c_dates.c file
of the scikits.timeseries library.
"""
alias_dict = {}
M_aliases = ["M", "MTH", "MONTH", "MONTHLY"]
B_aliases = ["B", "BUS", "BUSINESS", "BUSINESSLY", 'WEEKDAY']
D_aliases = ["D", "DAY", "DLY", "DAILY"]
H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"]
T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"]
S_aliases = ["S", "SEC", "SECOND", "SECONDLY"]
L_aliases = ["L", "ms", "MILLISECOND", "MILLISECONDLY"]
U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"]
N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"]
for k in M_aliases:
alias_dict[k] = 'M'
for k in B_aliases:
alias_dict[k] = 'B'
for k in D_aliases:
alias_dict[k] = 'D'
for k in H_aliases:
alias_dict[k] = 'H'
for k in T_aliases:
alias_dict[k] = 'Min'
for k in S_aliases:
alias_dict[k] = 'S'
for k in L_aliases:
alias_dict[k] = 'L'
for k in U_aliases:
alias_dict[k] = 'U'
for k in N_aliases:
alias_dict[k] = 'N'
A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR",
"YEARLY"]
Q_prefixes = ["Q", "QTR", "QUARTER", "QUARTERLY", "Q-E",
"QTR-E", "QUARTER-E", "QUARTERLY-E"]
month_names = [
["DEC", "DECEMBER"],
["JAN", "JANUARY"],
["FEB", "FEBRUARY"],
["MAR", "MARCH"],
["APR", "APRIL"],
["MAY", "MAY"],
["JUN", "JUNE"],
["JUL", "JULY"],
["AUG", "AUGUST"],
["SEP", "SEPTEMBER"],
["OCT", "OCTOBER"],
["NOV", "NOVEMBER"]]
seps = ["@", "-"]
for k in A_prefixes:
alias_dict[k] = 'A'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'A-' + m1
alias_dict[k + sep + m2] = 'A-' + m1
for k in Q_prefixes:
alias_dict[k] = 'Q'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'Q-' + m1
alias_dict[k + sep + m2] = 'Q-' + m1
W_prefixes = ["W", "WK", "WEEK", "WEEKLY"]
day_names = [
["SUN", "SUNDAY"],
["MON", "MONDAY"],
["TUE", "TUESDAY"],
["WED", "WEDNESDAY"],
["THU", "THURSDAY"],
["FRI", "FRIDAY"],
["SAT", "SATURDAY"]]
for k in W_prefixes:
alias_dict[k] = 'W'
for d_tup in day_names:
for sep in ["@", "-"]:
d1, d2 = d_tup
alias_dict[k + sep + d1] = 'W-' + d1
alias_dict[k + sep + d2] = 'W-' + d1
return alias_dict
def _infer_period_group(freqstr):
return _period_group(Resolution._reso_freq_map[freqstr])
def _period_group(freqstr):
base, mult = get_freq_code(freqstr)
return base // 1000 * 1000
_period_alias_dict = _period_alias_dictionary()
def _period_str_to_code(freqstr):
# hack
freqstr = _rule_aliases.get(freqstr, freqstr)
if freqstr not in _dont_uppercase:
freqstr = _rule_aliases.get(freqstr.lower(), freqstr)
try:
if freqstr not in _dont_uppercase:
freqstr = freqstr.upper()
return _period_code_map[freqstr]
except KeyError:
try:
alias = _period_alias_dict[freqstr]
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
return _period_code_map[alias]
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed
Parameters
----------
index : DatetimeIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
freq : string or None
None if no discernible frequency
TypeError if the index is not datetime-like
"""
import pandas as pd
if isinstance(index, com.ABCSeries):
values = index.values
if not (com.is_datetime64_dtype(index.values) or com.is_timedelta64_dtype(index.values) or values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype on a Series of {0}".format(index.dtype))
index = values
if com.is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif isinstance(index, pd.TimedeltaIndex):
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
index = index.values
index = | pd.DatetimeIndex(index) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stellargraph import StellarGraph, StellarDiGraph
import networkx as nx
import pandas as pd
import numpy as np
import random
import pytest
def create_graph_features():
# APPNP, ClusterGCN, GCN, PPNP, node_mappers
graph = nx.Graph()
graph.add_nodes_from(["a", "b", "c"])
graph.add_edges_from([("a", "b"), ("b", "c"), ("a", "c")])
graph = graph.to_undirected()
return graph, np.array([[1, 1], [1, 0], [0, 1]])
def relational_create_graph_features(is_directed=False):
# RGCN, relational node mappers
r1 = {"label": "r1"}
r2 = {"label": "r2"}
nodes = ["a", "b", "c"]
features = np.array([[1, 1], [1, 0], [0, 1]])
node_features = pd.DataFrame.from_dict(
{n: f for n, f in zip(nodes, features)}, orient="index"
)
graph = nx.MultiDiGraph() if is_directed else nx.MultiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from([("a", "b", r1), ("b", "c", r1), ("a", "c", r2)])
SG = StellarDiGraph if is_directed else StellarGraph
return SG(graph, node_features=node_features), features
def example_graph_nx(
feature_size=None, label="default", feature_name="feature", is_directed=False
):
graph = nx.DiGraph() if is_directed else nx.Graph()
elist = [(1, 2), (2, 3), (1, 4), (4, 2)]
graph.add_nodes_from([1, 2, 3, 4], label=label)
graph.add_edges_from(elist, label=label)
# Add example features
if feature_size is not None:
for v in graph.nodes():
graph.nodes[v][feature_name] = int(v) * np.ones(feature_size)
return graph
def _repeated_features(values_to_repeat, width):
column = np.expand_dims(values_to_repeat, axis=1)
return column.repeat(width, axis=1)
def example_graph(
feature_size=None,
node_label="default",
edge_label="default",
feature_name="feature",
is_directed=False,
):
elist = pd.DataFrame([(1, 2), (2, 3), (1, 4), (4, 2)], columns=["source", "target"])
nodes = [1, 2, 3, 4]
if feature_size is not None:
features = _repeated_features(nodes, feature_size)
else:
features = []
nodes = pd.DataFrame(features, index=nodes)
cls = StellarDiGraph if is_directed else StellarGraph
return cls(nodes={node_label: nodes}, edges={edge_label: elist})
def example_hin_1_nx(feature_name=None, for_nodes=None, feature_sizes=None):
# stellargraph
graph = nx.Graph()
graph.add_nodes_from([0, 1, 2, 3], label="A")
graph.add_nodes_from([4, 5, 6], label="B")
graph.add_edges_from([(0, 4), (1, 4), (1, 5), (2, 4), (3, 5)], label="R")
graph.add_edges_from([(4, 5)], label="F")
if feature_name is not None:
if for_nodes is None:
for_nodes = list(graph.nodes())
if feature_sizes is None:
feature_sizes = dict()
for v in for_nodes:
fs = feature_sizes.get(graph.nodes[v]["label"], 10)
graph.nodes[v][feature_name] = v * np.ones(fs)
return graph
def example_hin_1(
feature_sizes=None, is_directed=False, self_loop=False
) -> StellarGraph:
def features(label, ids):
if feature_sizes is None:
return []
else:
feature_size = feature_sizes.get(label, 10)
return _repeated_features(ids, feature_size)
a_ids = [0, 1, 2, 3]
a = pd.DataFrame(features("A", a_ids), index=a_ids)
b_ids = [4, 5, 6]
b = pd.DataFrame(features("B", b_ids), index=b_ids)
r = pd.DataFrame(
[(4, 0), (1, 5), (1, 4), (2, 4), (5, 3)], columns=["source", "target"]
)
f_edges, f_index = [(4, 5)], [6]
if self_loop:
# make it a multigraph
f_edges.extend([(5, 5), (5, 5)])
f_index.extend([7, 8])
# add some weights for the f edges, but not others
f_columns = ["source", "target", "weight"]
for i, src_tgt in enumerate(f_edges):
f_edges[i] = src_tgt + (10 + i,)
f = pd.DataFrame(f_edges, columns=f_columns, index=f_index)
cls = StellarDiGraph if is_directed else StellarGraph
return cls(nodes={"A": a, "B": b}, edges={"R": r, "F": f})
def create_test_graph_nx(is_directed=False):
# unsupervised sampler
graph = nx.DiGraph() if is_directed else nx.Graph()
edges = [
("0", 1),
("0", 2),
(1, 3),
(1, 4),
(3, 6),
(4, 7),
(4, 8),
(2, 5),
(5, 9),
(5, 10),
("0", "0"),
(1, 1),
(3, 3),
(6, 6),
(4, 4),
(7, 7),
(8, 8),
(2, 2),
(5, 5),
(9, 9),
("self loner", "self loner"), # an isolated node with a self link
]
graph.add_edges_from(edges)
graph.add_node("loner") # an isolated node without self link
return graph
def create_test_graph(is_directed=False):
# biased random walker, breadth first walker, directed breadth first walker, uniform random walker
if is_directed:
return StellarDiGraph(create_test_graph_nx(is_directed))
else:
return StellarGraph(create_test_graph_nx(is_directed))
def create_stellargraph():
# cluster gcn, cluster gcn node mapper
Gnx, features = create_graph_features()
nodes = Gnx.nodes()
node_features = pd.DataFrame.from_dict(
{n: f for n, f in zip(nodes, features)}, orient="index"
)
graph = StellarGraph(Gnx, node_features=node_features)
return graph
def example_graph_1_saliency_maps(feature_size=None):
# saliency gcn, saliency gat
graph = nx.Graph()
elist = [(0, 1), (0, 2), (2, 3), (3, 4), (0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
graph.add_nodes_from([0, 1, 2, 3, 4], label="default")
graph.add_edges_from(elist, label="default")
# Add example features
if feature_size is not None:
for v in graph.nodes():
graph.nodes[v]["feature"] = np.ones(feature_size)
return StellarGraph(graph, node_features="feature")
else:
return StellarGraph(graph)
def example_graph_random(feature_size=4, n_edges=20, n_nodes=6, n_isolates=1):
# core/utils, link mapper, node mapper graph 3
graph = nx.Graph()
n_noniso = n_nodes - n_isolates
edges = [
(random.randint(0, n_noniso - 1), random.randint(0, n_noniso - 1))
for _ in range(n_edges)
]
graph.add_nodes_from(range(n_nodes))
graph.add_edges_from(edges, label="default")
# Add example features
if feature_size is not None:
for v in graph.nodes():
graph.nodes[v]["feature"] = int(v) * np.ones(feature_size, dtype="int")
return StellarGraph(graph, node_features="feature")
else:
return StellarGraph(graph)
def node_features(seed=0) -> pd.DataFrame:
random = np.random.RandomState(seed)
node_data_np = random.rand(10, 10)
return | pd.DataFrame(node_data_np) | pandas.DataFrame |
import os
import glob
import pandas as pd
import numpy as np
import random
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, as_float_array
from sklearn.base import TransformerMixin, BaseEstimator
import kneed
import scipy
import seaborn as sns
import matplotlib.pyplot as plt
def load_data(exp, plate, filetype):
"""load all data from a single experiment into a single dataframe"""
path = os.path.join('profiles',
f'{exp}',
f'{plate}',
f'*_{filetype}')
files = glob.glob(path)
print(files)
df = pd.concat(pd.read_csv(_, low_memory=False) for _ in files)
return df
def get_metacols(df):
"""return a list of metadata columns"""
return [c for c in df.columns if c.startswith("Metadata_")]
def get_featurecols(df):
"""returna list of featuredata columns"""
return [c for c in df.columns if not c.startswith("Metadata")]
def get_metadata(df):
"""return dataframe of just metadata columns"""
return df[get_metacols(df)]
def get_featuredata(df):
"""return dataframe of just featuredata columns"""
return df[get_featurecols(df)]
def remove_negcon_empty_wells(df):
"""return dataframe of non-negative control wells"""
df = (
df.query('Metadata_control_type!="negcon"')
.dropna(subset=['Metadata_broad_sample'])
.reset_index(drop=True)
)
return df
def select_only_controls(df):
""" return dataframe of only controls, without outer wells"""
#df = (
# df.query('Metadata_Well!="A*"' and 'Metadata_Well!="P*"' and 'Metadata_Well!="*01"' and 'Metadata_Well!="*24"'
# and 'Metadata_control_type!="poscon_orf"' and 'Metadata_pert_type=="control"')
# )
df = (
df.query('Metadata_pert_type=="control"')
)
return df
def concat_profiles(df1, df2):
"""Concatenate dataframes"""
if df1.shape[0] == 0:
df1 = df2.copy()
else:
frames = [df1, df2]
df1 = pd.concat(frames, ignore_index=True, join="inner")
return df1
def percent_score(null_dist, corr_dist, how):
"""
Calculates the Percent strong or percent recall scores
:param null_dist: Null distribution
:param corr_dist: Correlation distribution
:param how: "left", "right" or "both" for using the 5th percentile, 95th percentile or both thresholds
:return: proportion of correlation distribution beyond the threshold
"""
if how == 'right':
perc_95 = np.nanpercentile(null_dist, 95)
above_threshold = corr_dist > perc_95
return np.mean(above_threshold.astype(float))*100, perc_95
if how == 'left':
perc_5 = np.nanpercentile(null_dist, 5)
below_threshold = corr_dist < perc_5
return np.mean(below_threshold.astype(float))*100, perc_5
if how == 'both':
perc_95 = np.nanpercentile(null_dist, 95)
above_threshold = corr_dist > perc_95
perc_5 = np.nanpercentile(null_dist, 5)
below_threshold = corr_dist < perc_5
return (np.mean(above_threshold.astype(float)) + np.mean(below_threshold.astype(float)))*100, perc_95, perc_5
def corr_between_replicates(df, group_by_feature):
"""
Correlation between replicates
Parameters:
-----------
df: pd.DataFrame
group_by_feature: Feature name to group the data frame by
Returns:
--------
list-like of correlation values
"""
replicate_corr = []
replicate_grouped = df.groupby(group_by_feature)
for name, group in replicate_grouped:
group_features = get_featuredata(group)
corr = np.corrcoef(group_features)
if len(group_features) == 1: # If there is only one replicate on a plate
replicate_corr.append(np.nan)
else:
np.fill_diagonal(corr, np.nan)
replicate_corr.append(np.nanmedian(corr)) # median replicate correlation
return replicate_corr
def corr_between_non_replicates(df, n_samples, n_replicates, metadata_compound_name):
"""
Null distribution between random "replicates".
Parameters:
------------
df: pandas.DataFrame
n_samples: int
n_replicates: int
metadata_compound_name: Compound name feature
Returns:
--------
list-like of correlation values, with a length of `n_samples`
"""
df.reset_index(drop=True, inplace=True)
null_corr = []
while len(null_corr) < n_samples:
compounds = random.choices([_ for _ in range(len(df))], k=n_replicates)
sample = df.loc[compounds].copy()
if len(sample[metadata_compound_name].unique()) == n_replicates:
sample_features = get_featuredata(sample)
corr = np.corrcoef(sample_features)
np.fill_diagonal(corr, np.nan)
null_corr.append(np.nanmedian(corr)) # median replicate correlation
return null_corr
def correlation_between_modalities(modality_1_df, modality_2_df, modality_1, modality_2, metadata_common, metadata_perturbation):
"""
Compute the correlation between two different modalities.
:param modality_1_df: Profiles of the first modality
:param modality_2_df: Profiles of the second modality
:param modality_1: feature that identifies perturbation pairs
:param modality_2: perturbation name feature
:param metadata_common: perturbation name feature
:param metadata_perturbation: perturbation name feature
:return: list-like of correlation values
"""
list_common_perturbation_groups = list(np.intersect1d(list(modality_1_df[metadata_common]), list(modality_2_df[metadata_common])))
merged_df = pd.concat([modality_1_df, modality_2_df], ignore_index=False, join='inner')
modality_1_df = merged_df.query('Metadata_modality==@modality_1')
modality_2_df = merged_df.query('Metadata_modality==@modality_2')
corr_modalities = []
for group in list_common_perturbation_groups:
modality_1_perturbation_df = modality_1_df.loc[modality_1_df[metadata_common] == group]
modality_2_perturbation_df = modality_2_df.loc[modality_2_df[metadata_common] == group]
for sample_1 in modality_1_perturbation_df[metadata_perturbation].unique():
for sample_2 in modality_2_perturbation_df[metadata_perturbation].unique():
modality_1_perturbation_sample_df = modality_1_perturbation_df.loc[modality_1_perturbation_df[metadata_perturbation] == sample_1]
modality_2_perturbation_sample_df = modality_2_perturbation_df.loc[modality_2_perturbation_df[metadata_perturbation] == sample_2]
modality_1_perturbation_profiles = get_featuredata(modality_1_perturbation_sample_df)
modality_2_perturbation_profiles = get_featuredata(modality_2_perturbation_sample_df)
corr = np.corrcoef(modality_1_perturbation_profiles, modality_2_perturbation_profiles)
corr = corr[0:len(modality_1_perturbation_profiles), len(modality_1_perturbation_profiles):]
corr_modalities.append(np.nanmedian(corr)) # median replicate correlation
return corr_modalities
def null_correlation_between_modalities(modality_1_df, modality_2_df, modality_1, modality_2, metadata_common, metadata_perturbation, n_samples):
"""
Compute the correlation between two different modalities.
:param modality_1_df: Profiles of the first modality
:param modality_2_df: Profiles of the second modality
:param modality_1: "Compound", "ORF" or "CRISPR"
:param modality_2: "Compound", "ORF" or "CRISPR"
:param metadata_common: feature that identifies perturbation pairs
:param metadata_perturbation: perturbation name feature
:param n_samples: int
:return:
"""
list_common_perturbation_groups = list(np.intersect1d(list(modality_1_df[metadata_common]), list(modality_2_df[metadata_common])))
merged_df = pd.concat([modality_1_df, modality_2_df], ignore_index=False, join='inner')
modality_1_df = merged_df.query('Metadata_modality==@modality_1')
modality_2_df = merged_df.query('Metadata_modality==@modality_2')
null_modalities = []
count = 0
while count < n_samples:
perturbations = random.choices(list_common_perturbation_groups, k=2)
modality_1_perturbation_df = modality_1_df.loc[modality_1_df[metadata_common] == perturbations[0]]
modality_2_perturbation_df = modality_2_df.loc[modality_2_df[metadata_common] == perturbations[1]]
for sample_1 in modality_1_perturbation_df[metadata_perturbation].unique():
for sample_2 in modality_2_perturbation_df[metadata_perturbation].unique():
modality_1_perturbation_sample_df = modality_1_perturbation_df.loc[modality_1_perturbation_df[metadata_perturbation] == sample_1]
modality_2_perturbation_sample_df = modality_2_perturbation_df.loc[modality_2_perturbation_df[metadata_perturbation] == sample_2]
modality_1_perturbation_profiles = get_featuredata(modality_1_perturbation_sample_df)
modality_2_perturbation_profiles = get_featuredata(modality_2_perturbation_sample_df)
corr = np.corrcoef(modality_1_perturbation_profiles, modality_2_perturbation_profiles)
corr = corr[0:len(modality_1_perturbation_profiles), len(modality_1_perturbation_profiles):]
null_modalities.append(np.nanmedian(corr)) # median replicate correlation
count += 1
return null_modalities
class ZCA_corr(BaseEstimator, TransformerMixin):
def __init__(self, copy=False):
self.copy = copy
def estimate_regularization(self, eigenvalue):
x = [_ for _ in range(len(eigenvalue))]
kneedle = kneed.KneeLocator(x, eigenvalue, S=1.0, curve='convex', direction='decreasing')
reg = eigenvalue[kneedle.elbow]/10.0
return reg # The complex part of the eigenvalue is ignored
def fit(self, X, y=None):
"""
Compute the mean, sphering and desphering matrices.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the mean, sphering and desphering
matrices.
"""
X = check_array(X, accept_sparse=False, copy=self.copy, ensure_2d=True)
X = as_float_array(X, copy=self.copy)
self.mean_ = X.mean(axis=0)
X_ = X - self.mean_
cov = np.dot(X_.T, X_) / (X_.shape[0] - 1)
V = np.diag(cov)
df = | pd.DataFrame(X_) | pandas.DataFrame |
import pandas as pd
import math
from sklearn.ensemble import RandomForestRegressor
trainInput = pd.read_excel("FPL_Season_Data_Only_Inputs_Shuffled2.xlsx")
trainInput = trainInput.drop(trainInput.columns[0], axis=1)
trainOutput = pd.read_excel("FPL_Season_Data_Only_Outputs_Shuffled2.xlsx")
trainOutput = trainOutput.pop("Points")
position = pd.get_dummies(trainInput["Position"],prefix="Position")
trainInput = pd.concat([position,trainInput], axis =1)
trainInput.drop(["Position"], axis=1, inplace=True)
trainInput.drop(["YC"], axis=1, inplace=True)
trainInput.drop(["RC"], axis=1, inplace=True)
trainInput.drop(["Bonus Points"], axis=1, inplace=True)
df = pd.read_excel("PredictionsData.xlsx")
position = pd.get_dummies(df["Position"],prefix="Position")
df = | pd.concat([position,df], axis =1) | pandas.concat |
# -*- coding: utf-8 -*-
"""Requests all forecasts (danger levels and problems) from the forecast api and writes to .csv file or plot."""
import datetime as dt
from varsomdata import getforecastapi as gf
from varsomdata import varsomclasses as vc
from varsomdata import getvarsompickles as gvp
from varsomdata import getmisc as gm
import logging as lg
import setenvironment as se
import pandas
__author__ = 'kmunve'
def test_AvalancheDanger_to_dict():
region_ids = [3022] # Trollheimen
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2018, 12, 5)
warnings_ = gf.get_avalanche_warnings_deprecated(region_ids, from_date, to_date, lang_key=1)
_d = warnings_[0].to_dict()
k = 'm'
def test_AvalancheDanger_as_df():
"""
Put class data into a pandas.DataFrame
:return:
"""
region_ids = [3022] # Trollheimen
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2018, 12, 6)
warnings_ = gf.get_avalanche_warnings_deprecated(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame.from_dict(warnings_)
df.to_csv(r'../localstorage/aval_danger.csv', header=True)
k = 'm'
def test_MountainWeather_class():
"""
Requires "forecast_api_version" : "v4.0.1" in /config/api.json
"""
region_ids = [3022] # Trollheimen
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2018, 12, 4)
warnings_as_json = gf.get_avalanche_warnings_as_json(region_ids, from_date, to_date, lang_key=1)
warnings_ = gf.get_avalanche_warnings_deprecated(region_ids, from_date, to_date, lang_key=1)
w = warnings_as_json[0]
mw = gf.MountainWeather()
mw.from_dict(w['MountainWeather'])
k = 'm'
def test_AvalancheWarning_class():
"""
Requires "forecast_api_version" : "v4.0.1" in /config/api.json
"""
region_ids = [3003]
from_date = dt.date(2018, 12, 3)
to_date = dt.date(2018, 12, 7)
warnings_as_json = gf.get_avalanche_warnings_as_json(region_ids, from_date, to_date, lang_key=1)
warnings_ = []
for w in warnings_as_json:
_aw = gf.AvalancheWarning()
_aw.from_dict(w)
warnings_.append(_aw)
print(warnings_[0])
k = 'm'
def test_get_avalanche_warnings():
region_ids = [3003]
from_date = dt.date(2018, 12, 3)
to_date = dt.date(2018, 12, 7)
aw = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=False)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/test_aw_dict.csv', index_label='index')
k = 'm'
def get_season_17_18():
region_ids = [3003,3007,3009,3010,3011,3012,3013,3014,3015,3016,3017,3022,3023,3024,3027,3028,3029,3031,3032,3034,3035]
from_date = dt.date(2017, 12, 1)
to_date = dt.date(2018, 5, 31)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/norwegian_avalanche_warnings_season_17_18.csv', index_label='index')
def get_season_18_19():
region_ids = [3003,3007,3009,3010,3011,3012,3013,3014,3015,3016,3017,3022,3023,3024,3027,3028,3029,3031,3032,3034,3035]
from_date = dt.date(2018, 12, 1)
to_date = dt.date(2019, 3, 11)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/norwegian_avalanche_warnings_season_18_19.csv', index_label='index')
def get_svalbard_regional_forecasts():
region_ids = [3001, 3002, 3003, 3004]
from_date = dt.date(2016, 12, 1)
to_date = dt.date(2019, 4, 30)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/svalbard_forecasts.csv', index_label='index')
def get_svalbard_regional_forecasts_2015():
region_ids = [130]
from_date = dt.date(2014, 12, 1)
to_date = dt.date(2015, 5, 31)
aw_dict = gf.get_avalanche_warnings(region_ids, from_date, to_date, lang_key=1, as_dict=True)
df = pandas.DataFrame(aw_dict)
df.to_csv('../localstorage/svalbard_forecasts_2015.csv', index_label='index')
# TODO: choose with get_season_18_19()
def get_season_raek(season='2018-19'):
"""Requests all forecasts (danger levels and problems) from the forecast api and writes to .csv file.
:param season: [string] Eg. '2019-20'. If parameter is not 7 char it will not make the csv.
"""
if len(season) == 7:
aw = gvp.get_all_forecasts(year=season)
aw_dict = [w.to_dict() for w in aw]
df = | pandas.DataFrame(aw_dict) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
import tensorflow as tf
from ml_hadoop_experiment.tensorflow import (
pandas_to_tensors
)
@pytest.mark.parametrize("init_data,result_data,default_value,dtype", [
([1, 2, None, 4], [1, 2, 3, 4], 3, np.int32),
([1.0, 2.0, None, 4.0], [1, 2, 3, 4], 3, np.int32),
([1.5, 2.5, None, 4.5], [1.5, 2.5, 3.5, 4.5], 3.5, np.float64),
(['a', 'b', None, 'd'], ['a', 'b', 'c', 'd'], "c", np.str)
])
def test_make_feature_list_scalar(init_data, result_data, default_value, dtype):
fun = pandas_to_tensors._make_feature_list_scalar("toto", default_value, dtype)
list_ = []
d = {'toto': init_data}
df = pd.DataFrame(data=d)
fun(df, list_)
assert len(list_) == 1
assert list_[0][0] == "toto"
assert (np.array_equal(list_[0][1], result_data))
@pytest.mark.parametrize("init_data,type", [
([1, 2, None, 4], np.int32),
([1.5, 2.5, None, 4.5], np.float64),
(['v', None], np.str)
])
def test_make_feature_list_scalar_no_default(init_data, type):
fun = pandas_to_tensors._make_feature_list_scalar("toto", None, type)
list_ = []
d = {'toto': init_data}
df = pd.DataFrame(data=d)
with pytest.raises(ValueError):
fun(df, list_)
def test_make_feature_list_varlen():
fun = pandas_to_tensors._make_feature_list_varlen("toto", np.str)
list_ = []
d = {'toto': [['a', 'b'], ['c', 'd'], None, ['e']]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
import pandas as pd
df1 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet1')
print(df1)
df2 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet2')
print(df2)
print(pd.merge(df1, df2))
df3 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet3')
print(df3)
print( | pd.merge(df1, df3, on='编号') | pandas.merge |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH#24128
df = DataFrame(
[1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = DataFrame(
[0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(1, fill_value=0)
tm.assert_frame_equal(result, exp)
exp = DataFrame(
[0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(2, fill_value=0)
tm.assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self):
# GH#35488
df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_frame.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.tshift(1)
expected = datetime_frame.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, inferred_ts)
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_frame.tshift()
def test_period_index_frame_shift_with_freq(self):
ps = tm.makePeriodFrame()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame):
shifted = datetime_frame.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.shift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.shift(1, freq="infer")
expected = datetime_frame.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, inferred_ts)
def test_period_index_frame_shift_with_freq_error(self):
ps = | tm.makePeriodFrame() | pandas._testing.makePeriodFrame |
#!/usr/bin/python
import importlib
# Add page files here
import Grapher
import MainPage
import NewPredictionPage
import AddModelPage
import DevToolsPage
import DNNPage
from pathlib import Path
import tkinter as tk
from tkinter import ttk
import pandas
import os
import Model
LARGE_FONT = ("Verdana", 12, "bold")
SMALL_FONT = ("Verdana", 10)
datafolder = Path("Data/Tickers")
# Base of the user interface; calls pages to be used from frames.
class UserInterface(tk.Tk):
# Easier for reading
def __init__(self, *args, **kwargs):
#======================= Creating the window =====================
super().__init__(*args, **kwargs)
#tk.Tk.iconbitmap(self, default = "Zoltar_Icon.ico")
tk.Tk.wm_title(self, "Zoltar")
# Container = window seen
container = tk.Frame(self)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight = 1)
container.grid_rowconfigure(1, weight = 1)
container.grid_columnconfigure(0, weight = 3) # Weights the graph to be larger
container.grid_columnconfigure(1, weight = 1)
# Frame configuration: loop runs through right-side frames
self.frames = {}
# Add all right-side frames to this loop
for F in (MainPage.MainWindow, NewPredictionPage.NewPredictionWindow, AddModelPage.AddModelWindow,
DevToolsPage.DevToolsWindow):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row = 0, column = 1, sticky = "nsew")
frame = DNNPage.DNNWindow(container, self)
self.frames[DNNPage.DNNWindow] = frame
frame.grid(row = 1, column = 1, sticky = "nsew")
# Only 1 Left-side frame, but same function as loop
frame = Grapher.GrapherWindow(container, self)
self.frames[Grapher.GrapherWindow] = frame
frame.grid(row = 0, rowspan = 2, column = 0, sticky = "nsew")
self.showFrame(MainPage.MainWindow) # Initial page to show
#====================== Data Handling Methods ======================
# Needs list: report available csv, append Model Results, get stock name,
# Get CSV from Stock name
def saveNewPrediction(self):
#============================== Collecting Variables ===============================
frame = self.frames[NewPredictionPage.NewPredictionWindow]
stockNames = frame.getCurrentlySelectedStocks()
startDate = frame.getStartDate()
endDate = frame.getEndDate()
predictionName = frame.getName()
predictionWindow = frame.getPredictionWindow()
print("someting")
print(predictionWindow)
path = "Data" + os.sep + "Saved_Stock_Data" + os.sep + predictionName + ".csv"
#=============================== Generating File ==================================
allData = pandas.DataFrame()
for stock in stockNames:
# Get the stock data from a CSV and put it in a data frame for editing
stockPath = "Data" + os.sep + "Tickers" + os.sep + stock + "_PriceData_40Years.csv"
csvData = pandas.read_csv(stockPath)
# Reanme the thrid colunm to the stock name
csvData.columns = ["Date", "Open", stock]
# Get rid of the open column, we just look at close prices
del csvData["Open"]
csvData["Date"] = pandas.to_datetime(csvData["Date"])
csvData = csvData.set_index("Date")
# The grapher has trouble with large floating numbers so truncate here
csvData[stock] = csvData[stock].round(decimals = 2)
# Then add it to the larger dataframe
allData = pandas.concat([allData, csvData], axis=1, sort=False)
allData['stockNames'] = stockNames
# Save the combined, modified, data frames
allData[startDate:endDate].to_csv(path, mode='a', header = True)
def getAvailableCSVs(self):
fileNames = os.listdir("Data" + os.sep + "Saved_Stock_Data")
return fileNames
def appendModelResults(self, fileName, modelResults):
try:
csvData = pandas.read_csv(fileName)
except:
print("ERROR: when reading csv file {}; aborting.".format(fileName))
return False
csvData['Model-Prediction'] = modelResults
try:
csvData.to_csv(fileName)
except:
print("ERROR: when writing to csv file{}; aborting.".format(filename))
return False
return True
def getAvailableStockNames(self):
stockNames = ["A", "AAP", "ABBV", "ABT", "ACN", "ADBE", "AES", "AET", "AFL",
"AKAM", "ALB", "ALK", "AMD", "AMG", "AOS", "APD",
"ARE", "ATVI", "AYI", "MMM"]
return stockNames
def getTickerCSVFromName(self, stockName):
fileName = stockName + "_PriceData.csv"
if os.path.isfile(datafolder + "/" + filename):
return fileName
print("ERROR: No stock data found for {}".format(filename))
#====================== DNN Handling Methods =======================
# Needs List: Load model results, report available models,
# Train new DNN
def getAvailableModels(self):
modelNames = ["Test Model"]
return modelNames
def getModelResults(self, modelName):
# Test data, unsure how models are run [[time], [up/down]]
results = [[1, 2, 3, 4,], [0, 0, 1, 1]]
return results
#===================== Grapher Interface Methods ========================
# Needs List:
def changeGrapherLabel(self, newLabel):
frame = self.frames[Grapher.GrapherWindow]
frame.GrapherWindow.changeLabel(newLabel)
def displayGraph(self, fileName = "TestData.csv"):
try:
data = | pandas.read_csv("Data" + os.sep + "Saved_Stock_Data" + os.sep + fileName) | pandas.read_csv |
import copy
import gc
import os
from datetime import datetime
import numpy as np
import pandas as pd
import tifffile as tif
from tifffile import TiffWriter
from .adaptive_estimation import AdaptiveShiftEstimation
from .image_positions import load_necessary_xml_tags, get_image_sizes_scan_auto, get_image_sizes_scan_manual, \
get_path_for_each_plane_and_field_per_channel
from .image_processing import stitch_z_projection, create_z_projection_for_fov, stitch_plane, stitch_images
from .ome_tags import create_ome_metadata, get_channel_metadata
from .saving_loading import load_parameters, save_parameters
class ImageStitcher:
def __init__(self):
# user input
self._img_dir = ''
self._xml_path = None
self._out_dir = ''
self._reference_channel = ''
self._stitch_only_ch = ['all']
self._scan = ''
self._stitching_mode = ''
self._ill_cor_ch = ['none']
self._is_adaptive = True
self._make_preview = True
self._save_param = ''
self._load_param_path = 'none'
self._img_name = ''
self._fovs = None
self._extra_meta = None
# working variables
self._channel_names = []
self._nchannels = 0
self._dtype = np.uint16
self._measurement_time = ''
self._ome_meta = ''
self._preview_ome_meta = ''
self._channel_ids = {}
self._y_pos = None
self._default_img_shape = tuple()
def stitch(self):
st = datetime.now()
print('\nstarted', st)
self.check_dir_exist()
self.check_scan_modes()
tag_Images, field_path_list, plane_path_list = self.load_metadata()
self._default_img_shape = (int(tag_Images[0].find('ImageSizeY').text), int(tag_Images[0].find('ImageSizeX').text))
ids, x_size, y_size = self.estimate_image_sizes(tag_Images, field_path_list)
self.generate_ome_meta(self._channel_ids, x_size, y_size, tag_Images, plane_path_list)
self.perform_stitching(ids, x_size, y_size, plane_path_list, field_path_list, self._ome_meta)
self.write_separate_ome_xml()
fin = datetime.now()
print('\nelapsed time', fin - st)
def check_dir_exist(self):
# check if input and output directories exist
if not os.path.isdir(self._img_dir):
raise ValueError('img_dir does not exist')
if not os.path.exists(self._out_dir):
os.makedirs(self._out_dir)
if not self._out_dir.endswith('/'):
self._out_dir = self._out_dir + '/'
if not self._img_dir.endswith('/'):
self._img_dir = self._img_dir + '/'
if self._xml_path is None:
self._xml_path = self._img_dir + 'Index.idx.xml'
def check_scan_modes(self):
available_scan_modes = ('auto', 'manual')
if self._scan not in available_scan_modes:
raise ValueError('Incorrect scan mode. Available scan modes ' + ', '.join(available_scan_modes))
available_stitching_modes = ('stack', 'maxz')
if self._stitching_mode not in available_stitching_modes:
raise ValueError(
'Incorrect stitching mode. Available stitching modes ' + ', '.join(available_stitching_modes))
def load_metadata(self):
tag_Images, tag_Name, tag_MeasurementStartTime = load_necessary_xml_tags(self._xml_path)
if self._fovs is not None:
self._fovs = [int(f) for f in self._fovs.split(',')]
plane_path_list, field_path_list = get_path_for_each_plane_and_field_per_channel(tag_Images, self._img_dir, self._fovs)
nchannels = len(plane_path_list.keys())
channel_names = list(plane_path_list.keys())
channel_ids = {ch: i for i, ch in enumerate(channel_names)}
if isinstance(self._stitch_only_ch, str):
self._stitch_only_ch = [self._stitch_only_ch]
if self._stitch_only_ch == ['all']:
self._stitch_only_ch = channel_names
if self._reference_channel == 'none':
self._reference_channel = channel_names[0]
elif self._stitch_only_ch != ['all']:
# if user specified custom number of channels check if they are correct
for i in self._stitch_only_ch:
if i not in channel_names:
raise ValueError('There is no channel with name ' + i + ' in the XML file. ' +
'Available channels ' + ', '.join(channel_names))
if self._reference_channel == 'none':
self._reference_channel = self._stitch_only_ch[0]
nchannels = len(self._stitch_only_ch)
channel_names = self._stitch_only_ch
if isinstance(self._ill_cor_ch, str):
self._ill_cor_ch = [self._ill_cor_ch]
if self._ill_cor_ch == ['all']:
self._ill_cor_ch = {ch: True for ch in channel_names}
elif self._ill_cor_ch == ['none']:
self._ill_cor_ch = {ch: False for ch in channel_names}
else:
self._ill_cor_ch = {ch: (True if ch in self._ill_cor_ch else False) for ch in channel_names}
self._channel_ids = {k: v for k, v in channel_ids.items() if k in channel_names}
self._channel_names = channel_names
self._nchannels = nchannels
self._measurement_time = tag_MeasurementStartTime
if self._img_name == '':
self._img_name = tag_Name
if not self._img_name.endswith(('.tif', '.tiff')):
self._img_name += '.tif'
return tag_Images, field_path_list, plane_path_list
def estimate_image_sizes(self, tag_Images, field_path_list):
if self._load_param_path == 'none':
if self._scan == 'auto':
ids, x_size, y_size, ids_in_clusters, self._y_pos = get_image_sizes_scan_auto(tag_Images, self._reference_channel, self._fovs)
elif self._scan == 'manual':
ids, x_size, y_size = get_image_sizes_scan_manual(tag_Images, self._reference_channel, self._fovs)
if self._is_adaptive == False:
ids = | pd.DataFrame(ids) | pandas.DataFrame |
from django.shortcuts import render
from django.http import HttpResponse
from django.views import View
import pytz
import numpy as np
from datetime import datetime, time
import pandas as pd
import os, subprocess, psutil
from django.conf.urls.static import static
from . forms import SubmitTickerSymbolForm
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #points to static folder
class CommandCenterView(View):
def __init__(self):
self.the_form = SubmitTickerSymbolForm()
self.month_year = datetime.now().strftime('%d | %B | %Y')
def contextRender(self, request,*args,**kwargs):
'''Common context renderer for the CommandCenterView'''
context = {
"title": "Command center",
"form": self.the_form,
"month_year": self.month_year,
"twsRunning": kwargs['msg'],
}
return render(request, "ib/commandCenter.html", context)
def get(self, request, *args, **kwargs):
t_msg = "Keep up the good work :)"
return self.contextRender(request\
,msg=t_msg)
def post(self, request, *args, **kwargs):
form = SubmitTickerSymbolForm(request.POST)
# launch trader work station(TWS)
if request.method == 'POST' and 'launchTws' in request.POST.keys():
if "tws.exe" in (p.name() for p in psutil.process_iter()):
t_msg = "TWS is running..."
return self.contextRender(request\
,msg=t_msg)
else:
subprocess.Popen(['C:\\Jts\\tws.exe'])
t_msg = "Launching TWS..."
return self.contextRender(request\
,msg=t_msg)
#add a ticker to forex list
elif request.method == 'POST' and 'forexQuote0' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
forex_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathForex)
except:
emptydf.to_csv(csvPathForex, sep=',', index=False)
df = pd.read_csv(csvPathForex)
client_id = [i for i in range(20, 25) if i not in df['clientid'].values ][0]
if forex_ticker in df['ticker'].values:
t_msg = "FAILED! "+forex_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = forex_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = " Added " + forex_ticker+ " to FOREX list"
return self.contextRender(request\
,msg=t_msg)
#add a ticker to stock list
elif request.method == 'POST' and 'stockQuote0' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
stock_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathStock)
except:
emptydf.to_csv(csvPathStock, sep=',', index=False)
df = pd.read_csv(csvPathStock)
# insertPoint = len([i for i in df['ticker'].values if isinstance(i, str)])
client_id = [i for i in range(5, 20) if i not in df['clientid'].values ][0]
if stock_ticker in df['ticker'].values:
t_msg = "FAILED! "+stock_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
#create emty csv to deal with file not found error
fName = "static\\csv\\realtimeData\\" + stock_ticker + "_raw_realtime_ib.csv"
csvPath = os.path.join(BASE_DIR, fName ) # original data
columns = ['Time', 'Open', 'High', 'Low', 'Close']
try:
if datetime.fromtimestamp(os.path.getmtime(csvPath)).date() < \
datetime.now(tz=pytz.timezone('US/Eastern')).date():
emptyDf = pd.DataFrame(columns=columns)
emptyDf.to_csv(csvPath, sep=',', index=False)
except:
emptyDf = pd.DataFrame(columns=columns)
emptyDf.to_csv(csvPath, sep=',', index=False)
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = stock_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = " Added " + stock_ticker+ " to STOCK list"
return self.contextRender(request\
,msg=t_msg)
#remove a ticker from the forex list
elif request.method == 'POST' and 'forexRow' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
row_number = int(request.POST['forexRow'])
f_ticker = request.POST['forexTicker']
df = pd.read_csv(csvPathForex)
pid_insert_point = df['ticker'].values.tolist().index(f_ticker)
pid = df['pid'].iloc[pid_insert_point].astype(int)
try:
p = psutil.Process(pid)
p.terminate()
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+f_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+f_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = "Process terminated! \n Successfully removed CSV and "\
+ f_ticker+" from FOREX list"
return self.contextRender(request\
,msg=t_msg)
except:
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+f_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+f_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = "Successfully removed "\
+ f_ticker+" from FOREX list! \n No active "+ f_ticker+" downloads!"
return self.contextRender(request\
,msg=t_msg)
#remove a ticker from the stock list
elif request.method == 'POST' and 'stockRow' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
row_number = int(request.POST['stockRow'])
s_ticker = request.POST['stockTicker']
df = pd.read_csv(csvPathStock)
pid_insert_point = df['ticker'].values.tolist().index(s_ticker)
pid = df['pid'].iloc[pid_insert_point].astype(int)
try:
# terminate quote downloads
p = psutil.Process(pid)
p.terminate()
#remove csv files
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+s_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+s_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
# remove from list
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = "Process terminated! \n Successfully removed "\
+ s_ticker+" from STOCK list"
return self.contextRender(request\
,msg=t_msg)
except:
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+s_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+s_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = " Successfully removed "\
+ s_ticker+" from STOCK list! \n No active "+ s_ticker+" downloads!"
return self.contextRender(request\
,msg=t_msg)
# get forex quote for a clicked ticker
elif request.method == 'POST' and 'forexQuote' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
f_ticker = request.POST['forexQuote']
df = pd.read_csv(csvPathForex)
pid_insert_point = df['ticker'].values.tolist().index(f_ticker)
try:
q = psutil.Process(df['pid'].iloc[pid_insert_point].astype(int))
t_msg = "FAILED to download FOREX "+ f_ticker\
+"!\n Terminate the ongoing download to start again"
return self.contextRender(request\
,msg=t_msg)
except:
client_id_no = df['clientid'].iloc[pid_insert_point].astype(int) # type conversion from np dtype to python datatype
scriptPath= ['E:\\ProgramData\\Anaconda3\\python.exe'\
, 'E:\\ProgramData\\Anaconda3\\Scripts\\mysite\\static\\py\\Forex1RealtimeIB.py']
args = ["-t", f_ticker, "-i", str(client_id_no)]
scriptPath.extend(args)
proc1 = subprocess.Popen(scriptPath)
df['pid'].iloc[pid_insert_point] = proc1.pid
df.to_csv(csvPathForex, sep=',', index=False)
if "tws.exe" in (p.name() for p in psutil.process_iter()):
try:
p = psutil.Process(proc1.pid)
t_msg = "Downloading FOREX "+ f_ticker+" now!"
return self.contextRender(request\
,msg=t_msg)
except:
t_msg = "FAILED to download FOREX "+ f_ticker+" check TWS status"
return self.contextRender(request\
,msg=t_msg)
else:
t_msg = "FAILED to download FOREX "+ f_ticker\
+" Please lauch TWS and try again "
return self.contextRender(request\
,msg=t_msg)
# get stock quote for the clicked ticker
elif request.method == 'POST' and 'stockQuote' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
s_ticker = request.POST['stockQuote']
df = | pd.read_csv(csvPathStock) | pandas.read_csv |
import pandas as pd
from .constants import SERVICE_URL
def get_url(*params, **kwargs):
"""
:param params: the URL path component strings
:option app: the CyREST application name
:return: the REST app URL
"""
path = [SERVICE_URL]
app = kwargs.get('app')
if app is not None:
path.append(app)
path.append('v1')
path.extend(params)
return '/'.join(path)
def get_fi_url(*params):
"""
A convenience function to format a URL path for
the `reactomefiviz` application.
:param params: the URL path component strings
:return: the CyREST Reactome FI url
"""
return get_url(*params, app='reactomefiviz')
def parse_fi_table_response(resp, parsers, index=None):
"""
Returns the data frame for the given CyREST Reactome
FI response. The response `data` property object must
be a JSON object with properties *tableHeaders* and
*tableContent*. If the response `data` is empty, then
this function returns an empty data frame with columns
given by the *parsers* keys.
The required *parsers* dictionary argument associates
a parser with each column.
:param resp: the CyREST response
:param parsers: the column parser dictionary
:option index: the index column name
:return: the parsed data frame
"""
# The response JSON data object.
data = resp.json()['data']
# The data columns.
columns = data.get('tableHeaders') if data else parsers.keys()
# The default "parser".
identity = lambda value: value
parsers_list = [parsers.get(col, identity) for col in columns]
# Parses a content row.
parse_row = lambda row: tuple(parsers_list[i](value)
for i, value in enumerate(row))
# The parsed content list.
content = map(parse_row, data['tableContent']) if data else []
# Return the data frames.
return | pd.DataFrame.from_records(content, index=index, columns=columns) | pandas.DataFrame.from_records |
# Importing Libraries
import pathlib
import warnings
from sklearn.model_selection import train_test_split
from tqdm import notebook
import time
import numpy as np
import pandas as pd
from sklearn.ensemble import *
from sklearn.preprocessing import StandardScaler
import socket
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from scipy.stats import pearsonr
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import shap
import time
from joblib import Parallel, delayed
from sklearn.svm import LinearSVR
import random
import optuna
import mlflow
import sys
import os
from mlflow import log_metric, log_param, log_artifacts
#Igonre Warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore" # Also affect subprocesses
#Calculate regression results
def regressionResult(y_true, predicted):
pearson = pearsonr(y_true, predicted)
mae = mean_absolute_error(y_true, predicted)
maepearson=1-mae+np.abs(pearson[0])
return maepearson
#Objective function for Bayesian Search
def objective(trial,data,target):
train_x, valid_x, train_y, valid_y = train_test_split(data, target, test_size=0.25, random_state=42)
# SVR will take the parameters
param = {
'clf__epsilon': trial.suggest_loguniform('epsilon', 1e-5, 1e-1),
'clf__C': trial.suggest_loguniform('C', 1e0, 1e4)
}
model = Pipeline([('scale', StandardScaler()),('clf', ML_instances["SVR"])])
bst = model.fit(train_x, train_y)
preds = bst.predict(valid_x)
mae = regressionResult(valid_y, preds)
return mae
#Importance Scores from SVR
def feature_importance(cc,column):
print(cc,column)
flag=0
noofsamples=samples
rankThreshold=5
df_sum=pd.DataFrame()
for k in range(noofsamples):
rowfrac=random.uniform(0.2, 0.8)
colfrac=random.uniform(0.2, 0.8)
if fourth_line !="":
if(column in tf_list):
Ntrain_df=train_df[tf_list].copy()
else:
Ntrain_df=pd.concat([train_df[tf_list],train_df[column]],axis=1)
else:
Ntrain_df=train_df.copy()
Ntrain_df=Ntrain_df.sample(frac=rowfrac)
Ntrain_df=pd.concat([Ntrain_df.drop(column, axis = 1).sample(frac=colfrac,axis="columns"),Ntrain_df[column]],axis=1)
y_train=Ntrain_df[column].to_numpy()
X_train = Ntrain_df.drop(column, axis = 1).to_numpy()
New_index=Ntrain_df.drop(column, axis = 1).columns+"_"+column
optuna.logging.set_verbosity(optuna.logging.WARNING)
study = optuna.create_study(direction="maximize")
study.optimize(lambda trial: objective(trial, X_train, y_train), n_trials=notrial, timeout=40,show_progress_bar=False)
# print(study.best_params)
dic=study.best_params
model = make_pipeline(StandardScaler(),LinearSVR(**dic))
clf = model.fit(X_train, y_train)
vals = np.abs(clf[1].coef_)
coeff=pd.DataFrame(vals, index=New_index, columns=['feat_importance'])
coeff.sort_values(by="feat_importance", inplace=True,ascending= False )
coeff[0:rankThreshold]=1
coeff[rankThreshold:len(coeff)]=0
if flag==0:
df_sum=coeff.copy()
flag=1
else:
df_sum = df_sum.add( coeff, fill_value=0)
return df_sum
# %%
#Importance Scores from ETR and RFR
def feature_importance2(cc,column):
print(cc,column)
y_train=train_df[column].to_numpy()
if fourth_line !="":
tftrain_df=train_df[tf_list]
else:
tftrain_df=train_df.copy()
if column in tftrain_df.columns:
X_train = tftrain_df.drop(column, axis = 1).to_numpy()
New_index=tftrain_df.drop(column, axis = 1).columns+"_"+column
else:
X_train = tftrain_df.to_numpy()
New_index=tftrain_df.columns+"_"+column
model = Pipeline([('scale', StandardScaler()),('clf', ML_instances[cc]),])
clf =model.fit(X_train, y_train)
explainer = shap.TreeExplainer(clf[1])
shap_values = explainer.shap_values(X_train,check_additivity=False)
vals1 = np.abs(shap_values).mean(0)
vals2 =clf[1].feature_importances_
df= pd.concat([pd.DataFrame(vals1, index=New_index, columns=['feat_importance']) , pd.DataFrame(vals2, index=New_index, columns=['shap'])],axis=1)
return df
#Calculate Classificiation Accuracy
def classificationResult(y, predicted,predicted_proba,Output_file,FileName,MethodName,flag=None):
auc_score= round(roc_auc_score(y, predicted_proba), 4)
aucPR_score= round(average_precision_score(y, predicted_proba), 4)
if(flag==None):
print("AUCROC (%),",round(auc_score, 3))
print("AUCPR (%),",round(aucPR_score, 3))
print("Average (%),",round((auc_score+aucPR_score)/2, 3))
if(flag==None):
print("AUCROC (%),",round(auc_score, 3),file=Output_file)
print("AUCPR (%),",round(aucPR_score, 3),file=Output_file)
print("Average (%),",round((auc_score+aucPR_score)/2, 3) ,file=Output_file)
mlflow.start_run(run_name=FileName)
mlflow.log_param("Method", MethodName)
log_metric("AUPR", auc_score)
log_metric("AUROC", aucPR_score)
log_metric("Average", (auc_score+aucPR_score)/2)
mlflow.end_run()
return (auc_score+aucPR_score)/2
#Calculate the AUROC and AUPR based on the groudtruth data
def evalresults(groundtruth_path,result_path,Output_file,FileName):
ground_truth=pd.read_csv(groundtruth_path.strip(),sep='\t',header=None)
new_index=ground_truth[0]+"_"+ground_truth[1]
ground_truth.index=new_index
ground_truth=ground_truth.drop([0,1], axis = 1)
ground_truth=ground_truth.sort_index()
ground_truth=ground_truth.rename(columns={2: "GroundTruth"})
ground_truth
ETR= | pd.read_csv("./"+result_path+"/ETR.csv",index_col=0) | pandas.read_csv |
import json
import multiprocessing as mp
import click
from fuzzywuzzy import fuzz
import numpy as np
import pandas as pd
RESULT_PATH = '../results'
BB_PATH = '../storage'
MSD_PATH = '../storage'
# Implements Click: a package for creating command line interfaces
@click.group()
@click.option(
'--path', default='.', help='The path where the results are stored.')
def cli(path):
global RESULT_PATH
RESULT_PATH = path
def main1():
msd_track_duplicates()
def main2():
msd = read_msd_unique_tracks()
year = read_msd_tracks_per_year()[['msd_id', 'year']]
billboard = read_billboard_tracks()
features = read_msd_feature_files()
msd = join(msd, year, on=['msd_id'])
msd = join(msd, features, on=['msd_id'])
matches = join(msd, billboard, on=['artist', 'title'])
duplicates = matches[matches.duplicated(
subset=['artist', 'title'], keep=False)]
duplicates.to_csv(RESULT_PATH + '/msd_bb_matches_duplicates.csv')
results = join(msd, billboard, on=['artist', 'title'], how='left')
duplicates = results[results.duplicated(
subset=['artist', 'title'], keep=False)]
duplicates.to_csv(RESULT_PATH + '/msd_bb_all_duplicates.csv')
@cli.command()
def match():
msd = read_msd_unique_tracks()
year = read_msd_tracks_per_year()[['msd_id', 'year']]
billboard = read_billboard_tracks()
features = read_msd_feature_files()
msd = join(msd, year, on=['msd_id'])
msd = join(msd, features, on=['msd_id'])
matches = join(msd, billboard, on=['artist', 'title'])
keep_first_duplicate(matches)
matches.to_csv(RESULT_PATH + '/msd_bb_matches.csv')
results = join(msd, billboard, on=['artist', 'title'], how='left')
keep_first_duplicate(results)
results.to_csv(RESULT_PATH + '/msd_bb_all.csv')
df_split = np.array_split(results, mp.cpu_count() * 4)
with mp.Pool() as pool:
result_entries = pool.imap_unordered(_fuzzy_match, df_split)
fuzzy_results = pd.DataFrame(
columns=list(msd.columns) + ['max_sim', 'artist_sim', 'title_sim'])
for result in result_entries:
fuzzy_results = fuzzy_results.append(
result, ignore_index=True, sort=False)
fuzzy_results.to_csv(RESULT_PATH + '/msd_bb_fuzzy_matches.csv')
fuzzy_results = fuzzy_results.loc[fuzzy_results['title_sim'] <= 40]
fuzzy_results = fuzzy_results[[
'msd_id', 'echo_nest_id', 'artist', 'title', 'year'
]]
fuzzy_results.to_csv(RESULT_PATH + '/msd_bb_non_matches.csv')
@cli.command()
def combine_lowlevel_features():
features = _combine_features(_combine_ll_features)
features.to_hdf(RESULT_PATH + '/msd_bb_ll_features.h5', 'll')
@cli.command()
def combine_highlevel_features():
features = _combine_features(_combine_hl_features)
features.to_hdf(RESULT_PATH + '/msd_bb_hl_features.h5', 'hl')
def _combine_features(combine_function):
hits = set(read_hits()['msd_id'])
non_hits = set(read_non_hits()['msd_id'])
msd_ids = hits | non_hits
all_features = pd.DataFrame()
df_split = np.array_split(list(msd_ids), mp.cpu_count() * 4)
with mp.Pool() as pool:
features = pool.imap_unordered(combine_function, df_split)
for feature in features:
all_features = all_features.append(
feature, sort=False, ignore_index=True)
return all_features
return None
def _combine_ll_features(msd_ids):
features_path = MSD_PATH # noqa E501
ll_features = pd.DataFrame()
for msd_id in msd_ids:
try:
file_id = pd.DataFrame([msd_id], columns=['msd_id'])
feature = pd.io.json.json_normalize(
_get_lowlevel_feature(features_path, msd_id))
ll_features = ll_features.append(
file_id.join(feature), sort=False, ignore_index=True)
except FileNotFoundError as error:
print(error)
return ll_features
def _combine_hl_features(msd_ids):
features_path = MSD_PATH # noqa E501
hl_features = pd.DataFrame()
for msd_id in msd_ids:
try:
file_id = pd.DataFrame([msd_id], columns=['msd_id'])
feature = pd.io.json.json_normalize(
_get_highlevel_feature(features_path, msd_id))
hl_features = hl_features.append(
file_id.join(feature), sort=False, ignore_index=True)
except FileNotFoundError as error:
print(error)
return hl_features
def _fuzzy_match(msd):
billboard = read_billboard_tracks()
results = pd.DataFrame(
columns=list(msd.columns) + ['max_sim', 'artist_sim', 'title_sim'])
for _, row_msd in msd.iterrows():
entry = {
**row_msd,
'max_sim': 0,
}
for _, row_bb in billboard.iterrows():
artist_sim, title_sim = fuzz.ratio(
row_msd['artist'], row_bb['artist']), fuzz.ratio(
row_msd['title'], row_bb['title'])
sim = fuzz.ratio(row_msd['artist'] + '|#|' + row_msd['title'],
row_bb['artist'] + '|#|' + row_bb['title'])
if sim > entry['max_sim']:
entry['max_sim'] = sim
entry['artist_sim'] = artist_sim
entry['title_sim'] = title_sim
entry['peak'] = row_bb['peak']
entry['weeks'] = row_bb['weeks']
entry = pd.Series(entry)
results = results.append(entry, ignore_index=True)
return results
def keep_first_duplicate(data):
data.drop_duplicates(
subset=['artist', 'title'], keep='first', inplace=True)
def remove_duplicates(data):
data.drop_duplicates(subset=['artist', 'title'], keep=False, inplace=True)
data.drop_duplicates(subset=['echo_nest_id'], keep=False, inplace=True)
def match_and_store_datasets(left,
right,
output_file,
how='inner',
hdf=None,
key='data'):
combined = join(left, right, on=['artist', 'title'], how=how)
if hdf:
combined.to_hdf(output_file, key=key)
else:
combined.to_csv(output_file)
def join(left, right, on, how='inner'):
return pd.merge(left, right, how=how, left_on=on, right_on=on)
def bb_track_duplicates():
bb = read_billboard_tracks()
tracks = bb.groupby(['artist', 'title'])
for index, group in tracks:
group_cnt = group.count()['peak']
if group_cnt > 1:
print(index, group_cnt)
def msd_track_duplicates():
msd = read_msd_unique_tracks()
unique_file_count = len(set(msd['msd_id']))
unique_id_count = len(set(msd['echo_nest_id']))
print(str(unique_file_count) + ',' + str(unique_id_count))
tracks = msd.groupby(['artist', 'title'])
count = 0
for index, group in tracks:
group_cnt = group.count()['msd_id']
if group_cnt > 1:
for item in group['msd_id']:
output_line = item + ',' + index
print(output_line)
count += 1
print(len(tracks), count)
def _get_highlevel_feature(features_path, msd_id):
file_suffix = '.mp3.highlevel.json'
return _load_feature(features_path, msd_id, file_suffix)
def _get_lowlevel_feature(features_path, msd_id):
file_suffix = '.mp3'
return _load_feature(features_path, msd_id, file_suffix)
def _load_feature(features_path, msd_id, file_suffix):
file_prefix = '/features_tracks_' + msd_id[2].lower() + '/'
file_name = features_path + file_prefix + msd_id + file_suffix
with open(file_name) as features:
return json.load(features)
def read_msd_tracks_per_year():
file_path = MSD_PATH + '/additional_files/tracks_per_year.txt'
return pd.read_csv(
file_path,
sep='<SEP>',
header=None,
names=['year', 'msd_id', 'artist', 'title'])
def read_msd_unique_artists():
file_path = MSD_PATH + '/additional_files/unique_tracks.txt'
return pd.read_csv(
file_path,
sep='<SEP>',
header=None,
names=['artist_id', 'mb_artist_id', 'msd_id', 'artist'])
def read_msd_unique_tracks():
file_path = MSD_PATH + '/additional_files/unique_tracks.txt'
return pd.read_csv(
file_path,
sep='<SEP>',
header=None,
names=['msd_id', 'echo_nest_id', 'artist', 'title'])
def read_msd_feature_files():
file_path = MSD_PATH + '/file_ids.csv'
return pd.read_csv(file_path, header=None, names=['msd_id'])
def read_billboard_tracks():
file_path = BB_PATH + '_mp3/billboard_1954-2018_summary.csv'
return pd.read_csv(file_path)
def read_hits():
file_path = BB_PATH + '/msd_bb_matches.csv'
return pd.read_csv(file_path)
def read_non_hits():
file_path = BB_PATH + '/msd_bb_non_matches.csv'
return | pd.read_csv(file_path) | pandas.read_csv |
"""Collect specific gene ontologies, and additional background/complex information """
import os
import re
import functools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sa
import statsmodels.formula.api as sfa
from GEN_Utils import FileHandling
from loguru import logger
from scipy import stats
import scikit_posthocs as sp
from utilities.database_map_and_filter import (go_lineage_tracer,go_term_details,
ontology_wordfinder, uniprot_go_genes, create_uniprot_xref, ortholog_map)
logger.info('Import OK')
clustered_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
background_path = f'results/lysate_denaturation/normalised/normalised_summary.xlsx'
resource_folder = f'resources/bioinformatics_databases/'
output_folder = 'results/lysate_denaturation/gene_ontology_datasets/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def add_homolog_id(datasets):
for name, data in datasets.items():
data['homologue_id'] = data['Proteins'].map(swiss_to_homologue_id)
data.to_csv(f'{output_folder}{name}.csv')
datasets.update({name: data})
return datasets
# MIG database for homology
homology_db = pd.read_table(f'{resource_folder}HOM_MouseHumanSequence.txt')
homology_db.dropna(subset=['SWISS_PROT IDs', 'HomoloGene ID'], inplace=True)
swiss_to_homologue_id = dict(zip(homology_db['SWISS_PROT IDs'], homology_db['HomoloGene ID']))
# Dataset 1: Any proteins associated with generic "protein complex" GO term GO:0032991
# collect proteins from ontology
complex_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0032991', child_terms=True, direct=False, output='list')
complex_genes = pd.DataFrame(complex_genes).rename(columns={0: 'Proteins'})
# Dataset 2: Against proteins associated with specific complexes: proteasome
# find terms associated with proteasome
potential_proteasome_terms = ontology_wordfinder(['proteasome']) # decided on "GO:0000502: proteasome complex"
proteasome_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0000502', child_terms=False, direct=True, output='list')
proteasome_genes = pd.DataFrame(proteasome_genes).rename(columns={0: 'Proteins'})
# Dataset 3: Against proteins associated with specific complexes: ribosome
# find terms associated with proteasome
potential_terms = ontology_wordfinder(['ribosome']) # decided on "GO:0003735 structural constituent of ribosome""
ribosome_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0003735', child_terms=False, direct=True, output='list')
ribosome_genes = pd.DataFrame(ribosome_genes).rename(columns={0: 'Proteins'})
# Dataset 4: Against proteins associated with specific complexes: DNA repair complex
# find terms associated with proteasome
potential_terms = ontology_wordfinder(['DNA repair complex']) # decided on "GO:1990391 DNA repair complex""
dna_genes = uniprot_go_genes(tax_id='10090', go_term='GO:1990391', child_terms=False, direct=True, output='list')
dna_genes = pd.DataFrame(dna_genes).rename(columns={0: 'Proteins'})
# Dataset 5: Against proteins associated with specific complexes: nuclear pore
# find terms associated with proteasome
potential_terms = ontology_wordfinder(['nuclear pore']) # decided on "GO:0005643 Nuclear pore"
pore_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0005643', child_terms=True, direct=False, output='list')
pore_genes = pd.DataFrame(pore_genes).rename(columns={0: 'Proteins'})
# Dataset 6: Against proteins associated with protein folding (chaperone)
# find terms associated with proteasome
potential_terms = ontology_wordfinder(['chaperone']) # decided on "GO:0061077 chaperone-mediated protein folding"
chaperone_genes = uniprot_go_genes(tax_id='10090', go_term='GO:0061077', child_terms=True, direct=False, output='list')
chaperone_genes = pd.DataFrame(chaperone_genes).rename(columns={0: 'Proteins'})
# Add homologue id's to each df
datasets = dict(zip(['complex_genes', 'proteasome_genes', 'ribosome_genes', 'dna_genes', 'pore_genes', 'chaperone_genes'], [complex_genes, proteasome_genes, ribosome_genes, dna_genes, pore_genes, chaperone_genes]))
datasets = add_homolog_id(datasets)
# save summaries to excel
file_list = [filename for filename in os.listdir(output_folder) if '.csv' in filename]
datasets = dict(zip([name.split('.csv')[0] for name in file_list], [pd.read_csv(f'{output_folder}{filename}') for filename in file_list]))
summary_df = functools.reduce(lambda left, right: | pd.merge(left, right, on='homologue_id', how='outer') | pandas.merge |
import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
def test_apply_issues():
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
tm.assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
tm.assert_series_equal(result, expected)
def test_apply_trivial():
# GH 20066
# trivial apply: ignore input and return a constant dataframe.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df.iloc[1:], df.iloc[1:]],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df.iloc[1:])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH#20066; function passed into apply "
"returns a DataFrame with the same index "
"as the one to create GroupBy object.",
strict=True)
def test_apply_trivial_fail():
# GH 20066
# trivial apply fails if the constant dataframe has the same index
# with the one used to create GroupBy object.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df, df],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df)
tm.assert_frame_equal(result, expected)
def test_fast_apply():
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
assert not mutated
def test_apply_with_mixed_dtype():
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1': np.random.randn(6),
'foo2': ['one', 'two', 'two', 'three', 'one', 'two']})
result = df.apply(lambda x: x, axis=1)
tm.assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1": [1, 2, 6, 6, 8]})
df["c2"] = df.c1 / 2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
tm.assert_series_equal(result1, result2)
def test_groupby_as_index_apply(df):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1, 2, 1, 1, 3, 1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
tm.assert_index_equal(res_as, exp)
tm.assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
tm.assert_index_equal(res_as_apply, exp_as_apply)
tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
tm.assert_index_equal(res, ind)
def test_apply_concat_preserve_names(three_group):
grouped = three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
assert result.index.names == ('A', 'B', 'stat')
result2 = grouped.apply(desc2)
assert result2.index.names == ('A', 'B', 'stat')
result3 = grouped.apply(desc3)
assert result3.index.names == ('A', 'B', None)
def test_apply_series_to_frame():
def f(piece):
with np.errstate(invalid='ignore'):
logged = np.log(piece)
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': logged})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, ts.index)
def test_apply_series_yield_constant(df):
result = df.groupby(['A', 'B'])['C'].apply(len)
assert result.index.names[:2] == ('A', 'B')
def test_apply_frame_yield_constant(df):
# GH13568
result = df.groupby(['A', 'B']).apply(len)
assert isinstance(result, Series)
assert result.name is None
result = df.groupby(['A', 'B'])[['C', 'D']].apply(len)
assert isinstance(result, Series)
assert result.name is None
def test_apply_frame_to_series(df):
grouped = df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
tm.assert_index_equal(result.index, expected.index)
tm.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series():
def trans(group):
return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
tm.assert_series_equal(result, exp, check_names=False)
assert result.name == 'C'
def test_apply_transform(ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
tm.assert_series_equal(result, expected)
def test_apply_multikey_corner(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
def f(group):
return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
tm.assert_frame_equal(result.loc[key], f(group))
def test_apply_chunk_view():
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': compat.lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
tm.assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict():
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': compat.lrange(10)[::-1]})
# it works! #2605
grouped = df.groupby(['name', 'name2'])
grouped.apply(lambda x: x.sort_values('value', inplace=True))
def test_apply_typecast_fail():
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(
['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)})
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_multiindex_fail():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_corner(tsframe):
result = tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
expected = tsframe * 2
tm.assert_frame_equal(result, expected)
def test_apply_without_copy():
# GH 5545
# returning a non-copy in an applied function fails
data = DataFrame({'id_field': [100, 100, 200, 300],
'category': ['a', 'b', 'c', 'c'],
'value': [1, 2, 3, 4]})
def filt1(x):
if x.shape[0] == 1:
return x.copy()
else:
return x[x.category == 'c']
def filt2(x):
if x.shape[0] == 1:
return x
else:
return x[x.category == 'c']
expected = data.groupby('id_field').apply(filt1)
result = data.groupby('id_field').apply(filt2)
tm.assert_frame_equal(result, expected)
def test_apply_corner_cases():
# #535, can't use sliding iterator
N = 1000
labels = np.random.randint(0, 100, size=N)
df = DataFrame({'key': labels,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
grouped = df.groupby('key')
def f(g):
g['value3'] = g['value1'] * 2
return g
result = grouped.apply(f)
assert 'value3' in result
def test_apply_numeric_coercion_when_datetime():
# In the past, group-by/apply operations have been over-eager
# in converting dtypes to numeric, in the presence of datetime
# columns. Various GH issues were filed, the reproductions
# for which are here.
# GH 15670
df = pd.DataFrame({'Number': [1, 2],
'Date': ["2017-03-02"] * 2,
'Str': ["foo", "inf"]})
expected = df.groupby(['Number']).apply(lambda x: x.iloc[0])
df.Date = pd.to_datetime(df.Date)
result = df.groupby(['Number']).apply(lambda x: x.iloc[0])
tm.assert_series_equal(result['Str'], expected['Str'])
# GH 15421
df = pd.DataFrame({'A': [10, 20, 30],
'B': ['foo', '3', '4'],
'T': [pd.Timestamp("12:31:22")] * 3})
def get_B(g):
return g.iloc[0][['B']]
result = df.groupby('A').apply(get_B)['B']
expected = df.B
expected.index = df.A
tm.assert_series_equal(result, expected)
# GH 14423
def predictions(tool):
out = pd.Series(index=['p1', 'p2', 'useTime'], dtype=object)
if 'step1' in list(tool.State):
out['p1'] = str(tool[tool.State == 'step1'].Machine.values[0])
if 'step2' in list(tool.State):
out['p2'] = str(tool[tool.State == 'step2'].Machine.values[0])
out['useTime'] = str(
tool[tool.State == 'step2'].oTime.values[0])
return out
df1 = pd.DataFrame({'Key': ['B', 'B', 'A', 'A'],
'State': ['step1', 'step2', 'step1', 'step2'],
'oTime': ['', '2016-09-19 05:24:33',
'', '2016-09-19 23:59:04'],
'Machine': ['23', '36L', '36R', '36R']})
df2 = df1.copy()
df2.oTime = pd.to_datetime(df2.oTime)
expected = df1.groupby('Key').apply(predictions).p1
result = df2.groupby('Key').apply(predictions).p1
| tm.assert_series_equal(expected, result) | pandas.util.testing.assert_series_equal |
"""Dataset preprocessing scripts"""
def process_mim_gold_ner():
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
import json
import re
from collections import defaultdict
conversion_dict = {
"O": "O",
"B-Person": "B-PER",
"I-Person": "I-PER",
"B-Location": "B-LOC",
"I-Location": "I-LOC",
"B-Organization": "B-ORG",
"I-Organization": "I-ORG",
"B-Miscellaneous": "B-MISC",
"I-Miscellaneous": "I-MISC",
"B-Date": "O",
"I-Date": "O",
"B-Time": "O",
"I-Time": "O",
"B-Money": "O",
"I-Money": "O",
"B-Percent": "O",
"I-Percent": "O",
}
def get_df(path: Path):
lines = path.read_text().split("\n")
data_dict = defaultdict(list)
tokens = list()
tags = list()
for line in tqdm(lines):
if line != "":
token, tag = line.split("\t")
tag = conversion_dict[tag]
tokens.append(token)
tags.append(tag)
else:
doc = " ".join(tokens)
doc = re.sub(" ([.,])", "\1", doc)
data_dict["doc"].append(doc)
data_dict["tokens"].append(tokens)
data_dict["ner_tags"].append(tags)
tokens = list()
tags = list()
return pd.DataFrame(data_dict)
def export_as_jsonl(df: pd.DataFrame, output_path: Path):
for idx, row in tqdm(list(df.iterrows())):
data_dict = dict(doc=row.doc, tokens=row.tokens, ner_tags=row.ner_tags)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(df) - 1:
f.write("\n")
data_dir = Path("datasets") / "mim_gold_ner"
train_input_path = data_dir / "raw_train"
val_input_path = data_dir / "raw_val"
test_input_path = data_dir / "raw_test"
train_output_path = data_dir / "train.jsonl"
test_output_path = data_dir / "test.jsonl"
train_df = pd.concat((get_df(train_input_path), get_df(val_input_path)))
test_df = get_df(test_input_path)
export_as_jsonl(train_df, train_output_path)
export_as_jsonl(test_df, test_output_path)
def process_fdt():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
dataset_dir = Path("datasets/fdt")
if not dataset_dir.exists():
dataset_dir.mkdir()
input_paths = [
Path("datasets/fo_farpahc-ud-train.conllu"),
Path("datasets/fo_farpahc-ud-dev.conllu"),
Path("datasets/fo_farpahc-ud-test.conllu"),
]
output_paths = [
Path("datasets/fdt/train.jsonl"),
Path("datasets/fdt/val.jsonl"),
Path("datasets/fdt/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
store = True
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
store = True
elif line.startswith("#"):
continue
elif line == "":
if tokens != [] and store:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
try:
deps.append(dep_conversion_dict[data[7]])
except KeyError:
store = False
def process_wikiann_fo():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
from sklearn.model_selection import train_test_split
import re
dataset_dir = Path("datasets/wikiann_fo")
if not dataset_dir.exists():
dataset_dir.mkdir()
input_path = Path("datasets/wikiann-fo.bio")
train_output_path = Path("datasets/wikiann_fo/train.jsonl")
test_output_path = Path("datasets/wikiann_fo/test.jsonl")
corpus = input_path.read_text().split("\n")
tokens = list()
ner_tags = list()
records = list()
for line in corpus:
if line != "":
data = line.split(" ")
tokens.append(data[0])
ner_tags.append(data[-1])
else:
assert len(tokens) == len(ner_tags)
doc = " ".join(tokens)
doc = re.sub(" ([.,])", "\1", doc)
records.append(dict(doc=doc, tokens=tokens, ner_tags=ner_tags))
tokens = list()
ner_tags = list()
# Show the NER tags in the dataset, as a sanity check
print(sorted(set([tag for record in records for tag in record["ner_tags"]])))
# Count the number of each NER tag, as a sanity check
tags = ["PER", "LOC", "ORG", "MISC"]
for tag in tags:
num = len([t for record in records for t in record["ner_tags"] if t[2:] == tag])
print(tag, num)
df = pd.DataFrame.from_records(records)
train, test = train_test_split(df, test_size=0.3)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
def export_as_jsonl(df: pd.DataFrame, output_path: Path):
for idx, row in tqdm(df.iterrows()):
data_dict = dict(doc=row.doc, tokens=row.tokens, ner_tags=row.ner_tags)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(df) - 1:
f.write("\n")
export_as_jsonl(train, train_output_path)
export_as_jsonl(test, test_output_path)
def process_idt():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
dataset_dir = Path("datasets/idt")
if not dataset_dir.exists():
dataset_dir.mkdir()
input_paths = [
Path("datasets/is_modern-ud-train.conllu"),
Path("datasets/is_modern-ud-dev.conllu"),
Path("datasets/is_modern-ud-test.conllu"),
]
output_paths = [
Path("datasets/idt/train.jsonl"),
Path("datasets/idt/val.jsonl"),
Path("datasets/idt/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
store = True
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
store = True
elif line.startswith("#"):
continue
elif line == "":
if tokens != [] and store:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
try:
deps.append(dep_conversion_dict[data[7]])
except KeyError:
store = False
def process_suc3():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
from lxml import etree
import pandas as pd
from sklearn.model_selection import train_test_split
import io
sdt_dir = Path("datasets/suc3")
if not sdt_dir.exists():
sdt_dir.mkdir()
conversion_dict = dict(
O="O",
animal="MISC",
event="MISC",
inst="ORG",
myth="MISC",
other="MISC",
person="PER",
place="LOC",
product="MISC",
work="MISC",
)
input_path = Path("datasets/suc3.xml")
train_output_path = Path("datasets/suc3/train.jsonl")
test_output_path = Path("datasets/suc3/test.jsonl")
print("Parsing XML file...")
xml_data = input_path.read_bytes()
context = etree.iterparse(io.BytesIO(xml_data), events=("start", "end"))
ner_tag = "O"
records = list()
for action, elt in context:
if elt.tag == "name" and action == "start":
ner_tag = f'B-{conversion_dict[elt.attrib["type"]]}'
elif elt.tag == "name" and action == "end":
ner_tag = "O"
elif elt.tag == "w" and action == "start":
if elt.text:
tokens.append(elt.text)
ner_tags.append(ner_tag)
elif elt.tag == "w" and action == "end":
if ner_tag.startswith("B-"):
ner_tag = f"I-{ner_tag[2:]}"
elif elt.tag == "sentence" and action == "end":
if len(tokens):
doc = " ".join(tokens)
doc = re.sub(" ([.,])", "\1", doc)
assert len(tokens) == len(ner_tags)
record = dict(doc=doc, tokens=tokens, ner_tags=ner_tags)
records.append(record)
elif elt.tag == "sentence" and action == "start":
tokens = list()
ner_tags = list()
ner_tag = "O"
# Count the number of each NER tag, as a sanity check
tags = ["PER", "LOC", "ORG", "MISC"]
for tag in tags:
num = len([t for record in records for t in record["ner_tags"] if t[2:] == tag])
print(tag, num)
df = pd.DataFrame.from_records(records)
train, test = train_test_split(df, test_size=0.3)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
def export_as_jsonl(df: pd.DataFrame, output_path: Path):
for idx, row in tqdm(df.iterrows()):
data_dict = dict(doc=row.doc, tokens=row.tokens, ner_tags=row.ner_tags)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(df) - 1:
f.write("\n")
export_as_jsonl(train, train_output_path)
export_as_jsonl(test, test_output_path)
def process_sdt():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
sdt_dir = Path("datasets/sdt")
if not sdt_dir.exists():
sdt_dir.mkdir()
input_paths = [
Path("datasets/sv_talbanken-ud-train.conllu"),
Path("datasets/sv_talbanken-ud-dev.conllu"),
Path("datasets/sv_talbanken-ud-test.conllu"),
]
output_paths = [
Path("datasets/sdt/train.jsonl"),
Path("datasets/sdt/val.jsonl"),
Path("datasets/sdt/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
store = True
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
store = True
elif line.startswith("#"):
continue
elif line == "":
if tokens != [] and store:
data_dict = dict(
ids=ids,
doc=(
doc.replace(" s k", " s.k.")
.replace("S k", "S.k.")
.replace(" bl a", " bl.a.")
.replace("Bl a", "Bl.a.")
.replace(" t o m", " t.o.m.")
.replace("T o m", "T.o.m.")
.replace(" fr o m", " fr.o.m.")
.replace("Fr o m", "Fr.o.m.")
.replace(" o s v", " o.s.v.")
.replace("O s v", "O.s.v.")
.replace(" d v s", " d.v.s.")
.replace("D v s", "D.v.s.")
.replace(" m fl", " m.fl.")
.replace("M fl", "M.fl.")
.replace(" t ex", " t.ex.")
.replace("T ex", "T.ex.")
.replace(" f n", " f.n.")
.replace("F n", "F.n.")
),
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(
data[1]
.replace("s k", "s.k.")
.replace("S k", "S.k.")
.replace("t o m", "t.o.m.")
.replace("T o m", "T.o.m.")
.replace("fr o m", "fr.o.m.")
.replace("Fr o m", "Fr.o.m.")
.replace("bl a", "bl.a.")
.replace("Bl a", "Bl.a.")
.replace("m fl", "m.fl.")
.replace("M fl", "M.fl.")
.replace("o s v", "o.s.v.")
.replace("O s v", "O.s.v.")
.replace("d v s", "d.v.s.")
.replace("D v s", "D.v.s.")
.replace("t ex", "t.ex.")
.replace("T ex", "T.ex.")
.replace("f n", "f.n.")
.replace("F n", "F.n.")
)
pos_tags.append(data[3])
heads.append(data[6])
try:
deps.append(dep_conversion_dict[data[7]])
except KeyError:
store = False
def process_norne_nn():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
ner_conversion_dict = {
"O": "O",
"B-LOC": "B-LOC",
"I-LOC": "I-LOC",
"B-PER": "B-PER",
"I-PER": "I-PER",
"B-ORG": "B-ORG",
"I-ORG": "I-ORG",
"B-MISC": "B-MISC",
"I-MISC": "I-MISC",
"B-GPE_LOC": "B-LOC",
"I-GPE_LOC": "I-LOC",
"B-GPE_ORG": "B-ORG",
"I-GPE_ORG": "I-ORG",
"B-PROD": "B-MISC",
"I-PROD": "I-MISC",
"B-DRV": "B-MISC",
"I-DRV": "I-MISC",
"B-EVT": "B-MISC",
"I-EVT": "I-MISC",
}
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
norne_dir = Path("datasets/norne_nn")
if not norne_dir.exists():
norne_dir.mkdir()
input_paths = [
Path("datasets/no_nynorsk-ud-train.conllu"),
Path("datasets/no_nynorsk-ud-dev.conllu"),
Path("datasets/no_nynorsk-ud-test.conllu"),
]
output_paths = [
Path("datasets/norne_nn/train.jsonl"),
Path("datasets/norne_nn/val.jsonl"),
Path("datasets/norne_nn/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
elif line.startswith("#"):
continue
elif line == "":
if tokens != []:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
ner_tags=ner_tags,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
deps.append(dep_conversion_dict[data[7]])
tag = data[9].replace("name=", "").split("|")[-1]
ner_tags.append(ner_conversion_dict[tag])
def process_norne_nb():
from pathlib import Path
import json
from tqdm.auto import tqdm
import re
ner_conversion_dict = {
"O": "O",
"B-LOC": "B-LOC",
"I-LOC": "I-LOC",
"B-PER": "B-PER",
"I-PER": "I-PER",
"B-ORG": "B-ORG",
"I-ORG": "I-ORG",
"B-MISC": "B-MISC",
"I-MISC": "I-MISC",
"B-GPE_LOC": "B-LOC",
"I-GPE_LOC": "I-LOC",
"B-GPE_ORG": "B-ORG",
"I-GPE_ORG": "I-ORG",
"B-PROD": "B-MISC",
"I-PROD": "I-MISC",
"B-DRV": "B-MISC",
"I-DRV": "I-MISC",
"B-EVT": "B-MISC",
"I-EVT": "I-MISC",
}
dep_conversion_dict = {
"acl": "acl",
"acl:relcl": "acl",
"acl:cleft": "acl",
"advcl": "advcl",
"advmod": "advmod",
"advmod:emph": "advmod",
"advmod:lmod": "advmod",
"amod": "amod",
"appos": "appos",
"aux": "aux",
"aux:pass": "aux",
"case": "case",
"cc": "cc",
"cc:preconj": "cc",
"ccomp": "ccomp",
"clf": "clf",
"compound": "compound",
"compound:lvc": "compound",
"compound:prt": "compound",
"compound:redup": "compound",
"compound:svc": "compound",
"conj": "conj",
"cop": "cop",
"csubj": "csubj",
"csubj:pass": "csubj",
"dep": "dep",
"det": "det",
"det:numgov": "det",
"det:nummod": "det",
"det:poss": "det",
"discourse": "discourse",
"dislocated": "dislocated",
"expl": "expl",
"expl:impers": "expl",
"expl:pass": "expl",
"expl:pv": "expl",
"fixed": "fixed",
"flat": "flat",
"flat:foreign": "flat",
"flat:name": "flat",
"goeswith": "goeswith",
"iobj": "iobj",
"list": "list",
"mark": "mark",
"nmod": "nmod",
"nmod:poss": "nmod",
"nmod:tmod": "nmod",
"nsubj": "nsubj",
"nsubj:pass": "nsubj",
"nummod": "nummod",
"nummod:gov": "nummod",
"obj": "obj",
"obl": "obl",
"obl:agent": "obl",
"obl:arg": "obl",
"obl:lmod": "obl",
"obl:loc": "obl",
"obl:tmod": "obl",
"orphan": "orphan",
"parataxis": "parataxis",
"punct": "punct",
"reparandum": "reparandum",
"root": "root",
"vocative": "vocative",
"xcomp": "xcomp",
}
norne_dir = Path("datasets/norne_nb")
if not norne_dir.exists():
norne_dir.mkdir()
input_paths = [
Path("datasets/no_bokmaal-ud-train.conllu"),
Path("datasets/no_bokmaal-ud-dev.conllu"),
Path("datasets/no_bokmaal-ud-test.conllu"),
]
output_paths = [
Path("datasets/norne_nb/train.jsonl"),
Path("datasets/norne_nb/val.jsonl"),
Path("datasets/norne_nb/test.jsonl"),
]
for input_path, output_path in zip(input_paths, output_paths):
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
ids = list()
doc = ""
lines = input_path.read_text().split("\n")
for idx, line in enumerate(tqdm(lines)):
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
elif line.startswith("#"):
continue
elif line == "":
if tokens != []:
data_dict = dict(
ids=ids,
doc=doc,
tokens=tokens,
pos_tags=pos_tags,
heads=heads,
deps=deps,
ner_tags=ner_tags,
)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(lines) - 1:
f.write("\n")
ids = list()
tokens = list()
pos_tags = list()
heads = list()
deps = list()
ner_tags = list()
doc = ""
else:
data = line.split("\t")
ids.append(data[0])
tokens.append(data[1])
pos_tags.append(data[3])
heads.append(data[6])
deps.append(dep_conversion_dict[data[7]])
tag = data[9].replace("name=", "").split("|")[-1]
ner_tags.append(ner_conversion_dict[tag])
def process_nordial():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
import json
dataset_dir = Path("datasets/nordial")
if not dataset_dir.exists():
dataset_dir.mkdir()
train_input_path = Path("datasets/nordial_train.json")
val_input_path = Path("datasets/nordial_val.json")
test_input_path = Path("datasets/nordial_test.json")
output_paths = [dataset_dir / "train.jsonl", dataset_dir / "test.jsonl"]
train = pd.read_json(train_input_path, orient="records").dropna()
val = pd.read_json(val_input_path, orient="records").dropna()
train = train.append(val)
test = pd.read_json(test_input_path, orient="records").dropna()
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.category)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_norec():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
import json
dataset_dir = Path("datasets/norec")
if not dataset_dir.exists():
dataset_dir.mkdir()
train_input_path = Path("datasets/norec_train.json")
val_input_path = Path("datasets/norec_val.json")
test_input_path = Path("datasets/norec_test.json")
output_paths = [dataset_dir / "train.jsonl", dataset_dir / "test.jsonl"]
train = pd.read_json(train_input_path, orient="records").dropna()
val = pd.read_json(val_input_path, orient="records").dropna()
train = train.append(val)
test = pd.read_json(test_input_path, orient="records").dropna()
for split, output_path in zip([train, test], output_paths):
for idx, row in tqdm(split.iterrows()):
data_dict = dict(text=row.text, label=row.label)
json_line = json.dumps(data_dict)
with output_path.open("a") as f:
f.write(json_line)
if idx < len(split) - 1:
f.write("\n")
def process_twitter_subj():
from pathlib import Path
import json
from tqdm.auto import tqdm
import pandas as pd
Path("datasets/twitter_subj").mkdir()
input_path = Path("datasets/twitter_sent.csv")
output_paths = [
Path("datasets/twitter_subj/train.jsonl"),
Path("datasets/twitter_subj/test.jsonl"),
]
df = | pd.read_csv(input_path, header=0) | pandas.read_csv |
# %%
import os
import pandas as pd
import numpy as np
import re
from scripts import motor, quitardecimal, valores, modelogeneral, especifico, origensegunvin, version, modelogenerico
# %% CARGA DE DATOS
diciembre2016 = pd.read_excel(r"D:\Basededatos\Origen\Panama\Concesionaria\Diciembre 2016.xlsx", engine='openpyxl')
diciembre2020 = | pd.read_excel(r"D:\Basededatos\Origen\Panama\Concesionaria\Diciembre 2020.xlsx", engine='openpyxl') | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
import pandas as pd
from main.git_log import git2data
import geocoder
from collections import Counter
from ratelimiter import RateLimiter
def get_country_name(name):
if (name == 'United States'):
return 'United States of America'
elif (name == 'België / Belgique / Belgien'):
return 'Belgium'
elif (name == 'België - Belgique - Belgien'):
return 'Belgium'
elif (name == 'Россия'):
return 'Russia'
elif (name == '中国'):
return 'China'
elif (name == 'Österreich'):
return 'Austria'
elif (name == '日本 (Japan)' or name == '日本'):
return 'Japan'
elif (name == 'Türkiye'):
return 'Turkey'
elif (name == 'Sverige'):
return 'Sweden'
elif (name == 'España'):
return 'Spain'
elif (name == 'Україна'):
return 'Ukraine'
elif (name == 'China 中国'):
return 'China'
elif (name == 'ישראל'):
return 'Israel'
elif (name == '대한민국'):
return 'Republic of Korea'
elif (name == 'Schweiz/Suisse/Svizzera/Svizra'):
return 'Switzerland'
elif (name == 'Suomi'):
return 'Finland'
elif (name == 'Ísland'):
return 'Iceland'
elif (name == 'Česko'):
return 'Czech Republic'
elif (name == 'Česká republika'):
return 'Czech Republic'
elif (name == 'Ελλάδα'):
return 'Greece'
elif (name == 'Беларусь'):
return 'Belarus'
elif (name == 'ایران'):
return 'Iran'
elif (name == '臺灣'):
return 'Taiwan'
else:
return name
@RateLimiter(max_calls = 1, period = 1)
def get_geo(loc):
geo_obj = geocoder.osm(loc)
return geo_obj
if platform.system() == 'Darwin' or platform.system() == 'Linux':
data_path = os.getcwd() + '/Clustering/'
#data_path = os.getcwd() + '/Clustering/project_list.csv'
#print(os.getcwd())
else:
#data_path = os.getcwd() + '\\Clustering\\project_list.csv'
data_path = os.getcwd() + '\\Clustering\\'
file_name = 'MolSSI_cleaned_input.csv'
#file_name = 'temp_input.csv'
project_list = | pd.read_csv(data_path + file_name) | pandas.read_csv |
# coding: utf-8
# In[1]:
import pandas as pd
import glob, os
from tqdm import tqdm
directory = "./data as csv/2016/"
os.chdir(directory)
#print(glob.glob("*csv"))
# In[2]:
all_files = glob.glob("*csv")
# In[3]:
all_files[0]
# In[4]:
error_files = []
# In[5]:
broken_files_13 = ['P4503501.201301180800 .csv',
'P4503501.201301250900 .csv',
'P4503501.201302101100 .csv',
'P4503501.201301180400 .csv',
'P4503501.201302101000 .csv',
'P4503501.201301261200 .csv',
'P4503501.201302140700 .csv',
'P4503501.201303130900 .csv',
'P4503501.201301171200 .csv',
'P4503501.201301170800 .csv',
'P4503501.201302140900 .csv',
'P4503501.201302101300 .csv',
'P4503501.201302140500 .csv',
'P4503501.201301171000 .csv',
'P4503501.201301181300 .csv',
'P4503501.201301180300 .csv',
'P4503501.201301161200 .csv',
'P4503501.201302140800 .csv',
'P4503501.201302101200 .csv',
'P4503501.201301250800 .csv',
'P4503501.201301181000 .csv',
'P4503501.201302141100 .csv',
'P4503501.201302140400 .csv',
'P4503501.201301180600 .csv',
'P4503501.201301180700 .csv',
'P4503501.201301170700 .csv',
'P4503501.201301171400 .csv',
'P4503501.201303131000 .csv',
'P4503501.201301261100 .csv',
'P4503501.201302140300 .csv',
'P4503501.201301180200 .csv',
'P4503501.201301181400 .csv',
'P4503501.201302100900 .csv',
'P4503501.201301251000 .csv',
'P4503501.201301171100 .csv',
'P4503501.201301170900 .csv',
'P4503501.201302141200 .csv',
'P4503501.201301181100 .csv',
'P4503501.201301250600 .csv',
'P4503501.201301250700 .csv',
'P4503501.201301171300 .csv',
'P4503501.201302100800 .csv',
'P4503501.201301180900 .csv',
'P4503501.201301181200 .csv',
'P4503501.201301261000 .csv',
'P4503501.201301180500 .csv',
'P4503501.201301261300 .csv',
'P4503501.201302141000 .csv',
'P4503501.201302140600 .csv',
'P4503501.201301171500 .csv']
broken_files_14 = ['P4503501.201412291100 .csv',
'P4503501.201412310800 .csv',
'P4503501.201412301200 .csv',
'P4503501.201412311000 .csv',
'P4503501.201412291000 .csv',
'P4503501.201412290900 .csv',
'P4503501.201412310700 .csv',
'P4503501.201412310400 .csv',
'P4503501.201412300800 .csv',
'P4503501.201412301300 .csv',
'P4503501.201412301000 .csv',
'P4503501.201412311400 .csv',
'P4503501.201412290800 .csv',
'P4503501.201412301400 .csv',
'P4503501.201412300900 .csv',
'P4503501.201412310600 .csv',
'P4503501.201412311300 .csv',
'P4503501.201412310500 .csv',
'P4503501.201412311100 .csv',
'P4503501.201412301100 .csv',
'P4503501.201412291200 .csv',
'P4503501.201412311200 .csv',
'P4503501.201412310900 .csv']
broken_files_15 = []
broken_files_16 = ['P4503501.201601181000 .csv',
'P4503501.201601181200 .csv',
'P4503501.201601180700 .csv',
'P4503501.201601180900 .csv',
'P4503501.201601181100 .csv',
'P4503501.201601180800 .csv']
broken_files_17 = ['P4503501.201701071300 .csv',
'P4503501.201701070300 .csv',
'P4503501.201701070800 .csv',
'P4503501.201701151300 .csv',
'P4503501.201701070100 .csv',
'P4503501.201701070600 .csv',
'P4503501.201701220900 .csv',
'P4503501.201701211100 .csv',
'P4503501.201701160600 .csv',
'P4503501.201701210700 .csv',
'P4503501.201701161100 .csv',
'P4503501.201701170800 .csv',
'P4503501.201701211000 .csv',
'P4503501.201701180700 .csv',
'P4503501.201701161000 .csv',
'P4503501.201701201300 .csv',
'P4503501.201701190500 .csv',
'P4503501.201701261000 .csv',
'P4503501.201701170900 .csv',
'P4503501.201701210800 .csv',
'P4503501.201701170200 .csv',
'P4503501.201701220800 .csv',
'P4503501.201701180000 .csv',
'P4503501.201701070900 .csv',
'P4503501.201701181400 .csv',
'P4503501.201701171100 .csv',
'P4503501.201701061300 .csv',
'P4503501.201701200900 .csv',
'P4503501.201701211200 .csv',
'P4503501.201701221300 .csv',
'P4503501.201701180600 .csv',
'P4503501.201701070200 .csv',
'P4503501.201701271100 .csv',
'P4503501.201701151000 .csv',
'P4503501.201701070500 .csv',
'P4503501.201701170400 .csv',
'P4503501.201701200600 .csv',
'P4503501.201701170700 .csv',
'P4503501.201701231200 .csv',
'P4503501.201701180800 .csv',
'P4503501.201701151200 .csv',
'P4503501.201701061100 .csv',
'P4503501.201701151400 .csv',
'P4503501.201701171200 .csv',
'P4503501.201701061200 .csv',
'P4503501.201701231100 .csv',
'P4503501.201701171000 .csv',
'P4503501.201701190400 .csv',
'P4503501.201701150900 .csv',
'P4503501.201701190800 .csv',
'P4503501.201701170300 .csv',
'P4503501.201701181100 .csv',
'P4503501.201701201100 .csv',
'P4503501.201701221200 .csv',
'P4503501.201701180400 .csv',
'P4503501.201701231000 .csv',
'P4503501.201701071200 .csv',
'P4503501.201701181300 .csv',
'P4503501.201701070700 .csv',
'P4503501.201701180100 .csv',
'P4503501.201701181000 .csv',
'P4503501.201701160800 .csv',
'P4503501.201701190700 .csv',
'P4503501.201701170600 .csv',
'P4503501.201701071000 .csv',
'P4503501.201701261100 .csv',
'P4503501.201701260800 .csv',
'P4503501.201701180200 .csv',
'P4503501.201701260700 .csv',
'P4503501.201701260500 .csv',
'P4503501.201701160900 .csv',
'P4503501.201701180900 .csv',
'P4503501.201701171400 .csv',
'P4503501.201701180500 .csv',
'P4503501.201701181200 .csv',
'P4503501.201701230900 .csv',
'P4503501.201701261200 .csv',
'P4503501.201701160700 .csv',
'P4503501.201701170100 .csv',
'P4503501.201701161300 .csv',
'P4503501.201701260400 .csv',
'P4503501.201701070400 .csv',
'P4503501.201701170500 .csv',
'P4503501.201701161400 .csv',
'P4503501.201701180300 .csv',
'P4503501.201701201200 .csv',
'P4503501.201701171300 .csv',
'P4503501.201701201000 .csv',
'P4503501.201701071100 .csv',
'P4503501.201701260600 .csv',
'P4503501.201701161200 .csv',
'P4503501.201701200700 .csv',
'P4503501.201701261300 .csv',
'P4503501.201701170000 .csv',
'P4503501.201701231300 .csv',
'P4503501.201701260900 .csv',
'P4503501.201701210900 .csv',
'P4503501.201701221000 .csv',
'P4503501.201701151100 .csv',
'P4503501.201701190600 .csv',
'P4503501.201701221100 .csv']
# In[6]:
for file in tqdm(all_files):
try:
data = | pd.read_csv(file, usecols = ['station', 'date.validite', 'temperature', 'nebulosite']) | pandas.read_csv |
import pandas as pd
from random import random
flow = (list(range(1,10,1)) + list(range(10,1,-1)))*100
pdata = pd.DataFrame({"a":flow, "b":flow})
pdata.b = pdata.b.shift(9)
data = pdata.iloc[10:] * random() # some noise
import numpy as np
def _load_data(data, n_prev = 100):
"""
data should be pd.DataFrame()
"""
docX, docY = [], []
for i in range(len(data)-n_prev):
docX.append(data.iloc[i:i+n_prev].as_matrix())
docY.append(data.iloc[i+n_prev].as_matrix())
alsX = np.array(docX)
alsY = np.array(docY)
return alsX, alsY
def train_test_split(df, test_size=0.1):
"""
This just splits data to training and testing parts
"""
ntrn = int(round(len(df) * (1 - test_size)))
X_train, y_train = _load_data(df.iloc[0:ntrn])
X_test, y_test = _load_data(df.iloc[ntrn:])
return (X_train, y_train), (X_test, y_test)
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
in_out_neurons = 2
hidden_neurons = 50
model = Sequential()
# n_prev = 100, 2 values per x axis
model.add(LSTM(hidden_neurons, input_shape=(100, 2)))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error",
optimizer="rmsprop",
metrics=['accuracy'])
(X_train, y_train), (X_test, y_test) = train_test_split(data)
model.fit(X_train, y_train, batch_size=700, nb_epoch=50, validation_data=(X_test, y_test), verbose=1)
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
predicted = model.predict(X_test, batch_size=700)
# and maybe plot it
pd.DataFrame(predicted).to_csv("predicted.csv")
| pd.DataFrame(y_test) | pandas.DataFrame |
# Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-09-29 18:41
@author: a002028
"""
import pandas as pd
from stations.utils import decmin_to_decdeg, decdeg_to_decmin, round_value, transform_ref_system
from stations.validators.validator import Validator, ValidatorLog
class SweRef99tmValidator(Validator):
"""
Coordinates in SWEREF 99TM are mandatory information for any list
"""
def __init__(self, *args, **kwargs):
super(SweRef99tmValidator, self).__init__()
for key, item in kwargs.items():
setattr(self, key, item)
def validate(self, list_obj, **kwargs):
"""
:param list_obj: stations.handler.List
:return:
"""
self.message(self.__class__.__name__, 'Running validation on list: %s' % list_obj.name)
report = {'approved': {},
'disapproved': {}}
if list_obj.has_values(self.lat_key) and list_obj.has_values(self.lon_key):
list_obj.boolean = list_obj.get(self.lat_key).ne('') & list_obj.get(self.lon_key).ne('')
for name, north, east in zip(list_obj.get('statn', boolean=True),
list_obj.get(self.lat_key, boolean=True),
list_obj.get(self.lon_key, boolean=True)):
report['approved'][name] = (north, east)
list_obj.boolean = list_obj.get(self.lat_key).eq('') | list_obj.get(self.lon_key).eq('')
for name, north, east in zip(list_obj.get('statn', boolean=True),
list_obj.get(self.lat_key, boolean=True),
list_obj.get(self.lon_key, boolean=True)):
report['disapproved'][name] = (north, east)
else:
for name in list_obj.get('statn'):
report['disapproved'][name] = False
if any(report['disapproved']) and self.fill_in_new_values:
self._calculate_coord_values(list_obj)
ValidatorLog.update_info(
list_name=list_obj.get('name'),
validator_name=self.name,
info=report,
)
self.message(self.__class__.__name__, 'Validation complete\n')
def _calculate_coord_values(self, list_obj):
"""
:param list_obj:
:return:
"""
if not list_obj.has_values(self.lat_key):
d = {self.lat_key: pd.Series([''] * list_obj.length).rename(self.lat_key),
self.lon_key: pd.Series([''] * list_obj.length).rename(self.lon_key)}
list_obj.set_attributes(**d)
list_obj.boolean = list_obj.get(self.lat_key).eq('')
if not all(list_obj.boolean):
return
if not list_obj.has_values('lat_dd'):
if list_obj.has_values('lat_dm'):
if not list_obj.has_attribute('lat_dd'):
d = {'lat_dd': | pd.Series([''] * list_obj.length) | pandas.Series |
import glob
import json
import pathlib
import subprocess
from decimal import Decimal
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as offline
import xmltodict as xd
# offline.init_notebook_mode()
ROOTDIR = "/home/jovyan/data/protein_pka"
pathlib.Path(f'{ROOTDIR}/result/pdb_meta').mkdir(parents=True, exist_ok=True)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def reverse_scientic_notation(x):
"""Change string x back to PDB ID.
Example
-------
'5e+28' -> '5E28'
'10000000.0' -> '1E07'
"""
x = f"{Decimal(x):.0E}".split("+")
if len(x[1]) == 1:
x[1] = f"0{x[1]}"
return "".join(x)
def unpack_list_in_df(df, col_target):
# Flatten columns of lists
col_flat = [item for sublist in df[col_target] for item in sublist]
# Row numbers to repeat
lens = df[col_target].apply(len)
vals = range(df.shape[0])
ilocations = np.repeat(vals, lens)
# Replicate rows and add flattened column of lists
cols = [i for i, c in enumerate(df.columns) if c != col_target]
new_df = df.iloc[ilocations, cols].copy()
new_df[col_target] = col_flat
return new_df
# # pK values of proteins with Uniprot IDs
if not pathlib.Path(f"{ROOTDIR}/pK_fixed.csv").is_file():
pK = pd.read_csv(f"{ROOTDIR}/pka_cleaned_merged.csv")
# fix IDs automatically converted to scientific notations
pK_mask = (pK["protein"].str.len() != 4)
pK_wrong = pK[pK_mask]
pK.loc[pK_mask, "protein"] = pK_wrong["protein"].apply(reverse_scientic_notation)
pK.to_csv(f"{ROOTDIR}/pK_fixed.csv", index=False)
pK = pd.read_csv(f"{ROOTDIR}/pK_fixed.csv")
print(f'There are altogether {len(pK["protein"].unique())} unique PDB IDs.')
# # Get general PDB annotation
if not pathlib.Path(f"{ROOTDIR}/result/pdb_meta/pdb_general.json").is_file():
# downloading files using PDB's RESTful API
url = "https://www.rcsb.org/pdb/rest/describePDB?structureId="
pdb_urls = [url + ",".join(pdb) for pdb in chunks(pK.protein.unique(), 1000)]
with open(f"{ROOTDIR}/result/pdb_meta/url_general.list", "w") as f:
f.write("\n".join(pdb_urls))
r = subprocess.run(["aria2c", "--input-file",
f"{ROOTDIR}/result/pdb_meta/url.list"])
# merging downloaded files into one json
filenames = glob.iglob(f"{ROOTDIR}/result/pdb_meta/describePDB*")
ans = {}
for filename in filenames:
with open(filename, "r") as f:
pdb = xd.parse(f.read())
for node in pdb['PDBdescription']['PDB']:
ans[node["@structureId"]] = node
with open(f"{ROOTDIR}/result/pdb_meta/pdb_general.json", "w") as f:
f.write(json.dumps(ans))
PDB_general = pd.read_json(f"{ROOTDIR}/result/pdb_meta/pdb_general.json").T
PDB_general = PDB_general[PDB_general["@status"] == "CURRENT"]
PDB_general = PDB_general[["@structureId", "@deposition_date", "@expMethod", "@resolution"]].reset_index(drop=True)
print(f"The general PDB annotation for {PDB_general.shape[0]} structures were downloaded.")
# PDB_general.sample(1)
# # Get PDB entity information
if not pathlib.Path(f"{ROOTDIR}/result/pdb_meta/pdb_entity.json").is_file():
url = "https://www.rcsb.org/pdb/rest/describeMol?structureId="
pdb_urls = [url + ",".join(pdb) for pdb in chunks(pK.protein.unique(), 1000)]
with open(f"{ROOTDIR}/result/pdb_meta/url_entity.list", "w") as f:
f.write("\n".join(pdb_urls))
r = subprocess.run(["aria2c", "--input-file",
f"{ROOTDIR}/result/pdb_meta/url.list"])
filenames = glob.iglob(f"{ROOTDIR}/result/pdb_meta/describeMol*")
ans = {}
for filename in filenames:
with open(filename, "r") as f:
pdb = xd.parse(f.read())
for node in pdb["molDescription"]["structureId"]:
ans[node["@id"]] = node
with open(f"{ROOTDIR}/result/pdb_meta/pdb_entity.json", "w") as f:
f.write(json.dumps(ans))
PDB_entity = pd.read_json(f"{ROOTDIR}/result/pdb_meta/pdb_entity.json").T
print(f"The PDB entity annotation for {PDB_entity.shape[0]} structures were downloaded.")
PDB_entity_single = PDB_entity[PDB_entity["polymer"].apply(lambda x: isinstance(x, dict))]
PDB_entity_multi = PDB_entity[PDB_entity["polymer"].apply(lambda x: isinstance(x, list))]
# Unpack list in cell
PDB_entity_multi = unpack_list_in_df(PDB_entity_multi, "polymer")
PDB_entity = pd.concat([PDB_entity_single, PDB_entity_multi], axis=0).reset_index(drop=True)
PDB_entity["@length"] = PDB_entity["polymer"].map(lambda x: x["@length"])
PDB_entity["@chain"] = PDB_entity["polymer"].map(lambda x: x["chain"]) # dict
PDB_entity["@weight"] = PDB_entity["polymer"].map(lambda x: x["@weight"])
PDB_entity["@Taxonomy"] = [x["Taxonomy"] if "Taxonomy" in x else np.nan for x in PDB_entity["polymer"]]
PDB_entity["@synonym"] = [x["synonym"] if "synonym" in x else np.nan for x in PDB_entity["polymer"]]
PDB_entity["@uniprot"] = [x["macroMolecule"] if "macroMolecule" in x else np.nan for x in PDB_entity["polymer"]]
PDB_entity = PDB_entity.drop("polymer", axis=1).dropna()
# represent `@chain` in a way that's easier to manipulate
PDB_entity["@chain"] = [[x] if isinstance(x, dict) else x for x in PDB_entity["@chain"]]
PDB_entity["@chain"] = PDB_entity["@chain"].map(lambda x: "".join(sorted([y["@id"] for y in x])))
PDB_entity["@chain"] = PDB_entity["@chain"].str.upper()
# same goes for `@synonym`
PDB_entity["@synonym"] = [[x] if isinstance(x, dict) else x for x in PDB_entity["@synonym"]]
PDB_entity["@synonym"] = PDB_entity["@synonym"].map(lambda x: "".join(sorted([y["@name"] for y in x])))
PDB_entity["@synonym"] = PDB_entity["@synonym"].str.upper()
# and uniprot
PDB_entity["@uniprot"] = [[x] if isinstance(x, dict) else x for x in PDB_entity["@uniprot"]]
PDB_entity["@uniprot"] = PDB_entity["@uniprot"].map(lambda x: [y["accession"]["@id"] for y in x])
PDB_entity = unpack_list_in_df(PDB_entity, "@uniprot")
print(f"After unpacking, there are {PDB_entity.shape[0]} entries in PDB_entity.")
# # Convert to Ensembl Gene ID
# ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz
annotation = pd.read_csv("/home/jovyan/data/annotation/uniprot_id_map/HUMAN_9606_idmapping.csv")
uniprot_ensembl = annotation[annotation["ID_type"] == "Ensembl"]
PDB = | pd.merge(PDB_entity, uniprot_ensembl, left_on=PDB_entity["@uniprot"], right_on=uniprot_ensembl["UniProtKB_AC"]) | pandas.merge |
# standard libraries
import os
# third-party libraries
import pandas as pd
# local imports
from .. import count_data
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestCsvToDf:
"""
Tests converting a csv with various headers into a processible DataFrame
"""
def test_timestamp(self):
"""
Check if a csv w/ a timestamp is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp.csv')
element_id = 'tagID'
timestamp = 'timestamp'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == -6761865716520410554
def test_timestamp_ba(self):
"""
Check if a csv w/ a timestamp and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp_ba.csv')
element_id = 'tagID'
timestamp = 'timestamp'
boardings = 'boardings'
alightings = 'alightings'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7008548250528393651
def test_session(self):
"""
Check if a csv w/ session times is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end, lat=lat, lon=lon)
assert | pd.util.hash_pandas_object(test_df) | pandas.util.hash_pandas_object |
import datetime
import pandas as pd
from mortgage_calculator.plots import (monthly_payment_breakdown_plot,
mortgage_amortization_plot,
principal_vs_time_plot)
def test_monthly_payment_breakdown_plot():
actual_plot = monthly_payment_breakdown_plot(home_cost=300_000,
interest_rate_pct=3.0,
mort_len_yr=30,
down_payment_type='Dollars',
down_payment_value=30_000,
prop_tax_amt=3_000,
prop_tax_type='Dollars',
home_ins=300,
hoa_fees=30,
pmi_percent=0.5)
actual_data = actual_plot.data.Amount
expected_data = pd.Series([1138.33, 250.00, 300.00, 30.00, 112.50])
assert actual_data.equals(expected_data)
def test_mortgage_amortization_plot():
actual_plot = mortgage_amortization_plot(300_000, 3.0, 30, 'Dollars', 30_000, 'Dollars', 3_000, 300, 30, 0.5,
datetime.date(2020, 1, 1))
actual_data = actual_plot.data.loc[0:4, 'payment_value'].round(2)
expected_data = | pd.Series([5160.83, 5794.03, 5970.26, 6151.85, 6338.97]) | pandas.Series |
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
base = pd.read_csv('autos.csv', encoding = 'ISO-8859-1')
base = base.drop('dateCrawled', axis = 1)
base = base.drop('dateCreated', axis = 1)
base = base.drop('nrOfPictures', axis = 1)
base = base.drop('postalCode', axis = 1)
base = base.drop('lastSeen', axis = 1)
base['name'].value_counts()
base = base.drop('name', axis = 1)
base['seller'].value_counts()
base = base.drop('seller', axis = 1)
base['offerType'].value_counts()
base = base.drop('offerType', axis = 1)
i1 = base.loc[base.price <= 10]
base.price.mean()
base = base[base.price > 10]
i2 = base.loc[base.price > 350000]
base = base.loc[base.price < 350000]
base.loc[pd.isnull(base['vehicleType'])]
base['vehicleType'].value_counts() # limousine
base.loc[ | pd.isnull(base['gearbox']) | pandas.isnull |
import numpy as np
import pandas as pd
from distance import get_similar_questions
from preprocessing import TFIDFEmbedding, BertEmbedding
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="bs4")
def find_similar_comments(df, threshold, k, embedding, return_dist=True):
comments = df.comment_id.values
step = df.step_id.values
q = df.is_question.values
if embedding == "bert":
bert = BertEmbedding(df)
vectorized_texts = bert.evaluate()
elif embedding == "tfidf":
tfidf = TFIDFEmbedding(df)
vectorized_texts = tfidf.evaluate()
else:
raise Exception("The wrong method to get embeddings. Use 'bert' or 'tfidf'")
Q = vectorized_texts[q]
top_k_index, top_k_dist = get_similar_questions(Q, vectorized_texts, threshold, k)
comm_idx = np.append(comments, -1)[top_k_index]
df_similarity = | pd.DataFrame(comm_idx) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import time
from ldsc_polyfun import jackknife, regressions, sumstats, ldscore, parse
import logging
from copy import deepcopy
from tqdm import tqdm
from polyfun_utils import Logger, check_package_versions, set_snpid_index, configure_logger, get_file_name
from polyfun_utils import SNP_COLUMNS
from pyarrow import ArrowIOError
from pyarrow.lib import ArrowInvalid
from compute_ldscores_from_ld import compute_ldscores_chr
import tempfile
MAX_CHI2=80
def __filter__(fname, noun, verb, merge_obj):
merged_list = None
if fname:
f = lambda x,n: x.format(noun=noun, verb=verb, fname=fname, num=n)
x = parse.FilterFile(fname)
c = 'Read list of {num} {noun} to {verb} from {fname}'
logging.info(f(c, len(x.IDList)))
merged_list = merge_obj.loj(x.IDList)
len_merged_list = len(merged_list)
if len_merged_list > 0:
c = 'After merging, {num} {noun} remain'
logging.info(f(c, len_merged_list))
else:
error_msg = 'No {noun} retained for analysis'
raise ValueError(f(error_msg, 0))
return merged_list
def splash_screen():
print('*********************************************************************')
print('* PolyFun (POLYgenic FUNctionally-informed fine-mapping)')
print('* Version 1.0.0')
print('* (C) 2019-2021 <NAME>')
print('*********************************************************************')
print()
def check_args(args):
#verify that the requested computations are valid
mode_params = np.array([args.compute_h2_L2, args.compute_ldscores, args.compute_h2_bins])
if np.sum(mode_params)==0:
raise ValueError('must specify at least one of --compute-h2-L2, --compute-ldscores, --compute-h2-bins')
if args.compute_h2_L2 and args.compute_h2_bins and not args.compute_ldscores:
raise ValueError('cannot use both --compute-h2_L2 and --compute_h2_bins without also specifying --compute-ldscores')
if args.chr is not None:
if args.compute_h2_L2 or args.compute_h2_bins:
raise ValueError('--chr can only be specified when using only --compute-ldscores')
if args.bfile_chr is not None:
if not args.compute_ldscores:
raise ValueError('--bfile-chr can only be specified when using --compute-ldscores')
if args.ld_ukb:
if not args.compute_ldscores:
raise ValueError('--ld-ukb can only be specified when using --compute-ldscores')
if args.no_partitions:
if not args.compute_h2_L2:
raise ValueError('cannot specify --no-partitions without specifying --compute-h2-L2')
if args.compute_ldscores:
raise ValueError('cannot specify both --no-partitions and --compute-ldscores')
if args.compute_h2_bins:
raise ValueError('cannot specify both --no-partitions and --compute-h2-bins')
if args.compute_ldscores and args.compute_h2_bins and not args.compute_h2_L2:
raise ValueError('cannot use both --compute-ldscores and --compute_h2_bins without also specifying --compute-h2-L2')
#verify partitioning parameters
if args.skip_Ckmedian and (args.num_bins is None or args.num_bins<=0):
raise ValueError('You must specify --num-bins when using --skip-Ckmedian')
#verify LD-score related parameters
if args.ld_dir is not None and not args.ld_ukb:
raise ValueError('You cannot specify --ld-dir without also specifying --ld-ukb')
if args.bfile_chr is not None and args.ld_ukb:
raise ValueError('You can specify only one of --bfile-chr and --ld-ukb')
if args.compute_ldscores:
if args.bfile_chr is None and not args.ld_ukb:
raise ValueError('You must specify either --bfile-chr or --ld-ukb when you specify --compute-ldscores')
if not args.ld_ukb and (args.ld_wind_cm is None and args.ld_wind_kb is None and args.ld_wind_snps is None):
args.ld_wind_cm = 1.0
logging.warning('no ld-wind argument specified. PolyFun will use --ld-cm 1.0')
if not args.compute_ldscores:
if not (args.ld_wind_cm is None and args.ld_wind_kb is None and args.ld_wind_snps is None):
raise ValueError('--ld-wind parameters can only be specified together with --compute-ldscores')
if args.keep is not None:
raise ValueError('--keep can only be specified together with --compute-ldscores')
if args.chr is not None:
raise ValueError('--chr can only be specified together with --compute-ldscores')
if args.compute_h2_L2:
if args.sumstats is None:
raise ValueError('--sumstats must be specified when using --compute-h2-L2')
if args.ref_ld_chr is None:
raise ValueError('--ref-ld-chr must be specified when using --compute-h2-L2')
if args.w_ld_chr is None:
raise ValueError('--w-ld-chr must be specified when using --compute-h2-L2')
if args.compute_h2_bins:
if args.sumstats is None:
raise ValueError('--sumstats must be specified when using --compute-h2-bins')
if args.w_ld_chr is None:
raise ValueError('--w-ld-chr must be specified when using --compute-h2-bins')
if args.ref_ld_chr is not None and not args.compute_ldscores:
raise ValueError('--ref-ld-chr should not be specified when using --compute-h2-bins, unless you also use --compute-ldscores')
return args
def check_files(args):
#check that required input files exist
if args.compute_h2_L2:
if not os.path.exists(args.sumstats):
raise IOError('Cannot find sumstats file %s'%(args.sumstats))
for chr_num in range(1,23):
get_file_name(args, 'ref-ld', chr_num, verify_exists=True, allow_multiple=True)
get_file_name(args, 'w-ld', chr_num, verify_exists=True)
get_file_name(args, 'annot', chr_num, verify_exists=True, allow_multiple=True)
if args.compute_ldscores:
if args.chr is None: chr_range = range(1,23)
else: chr_range = range(args.chr, args.chr+1)
for chr_num in chr_range:
if args.bfile_chr is not None:
get_file_name(args, 'bim', chr_num, verify_exists=True)
get_file_name(args, 'fam', chr_num, verify_exists=True)
get_file_name(args, 'bed', chr_num, verify_exists=True)
if not args.compute_h2_L2:
get_file_name(args, 'snpvar_ridge', chr_num, verify_exists=True)
get_file_name(args, 'bins', chr_num, verify_exists=True)
if args.compute_h2_bins and not args.compute_ldscores:
for chr_num in range(1,23):
get_file_name(args, 'w-ld', chr_num, verify_exists=True)
if not args.compute_h2_L2:
get_file_name(args, 'bins', chr_num, verify_exists=True)
class PolyFun:
def __init__(self):
pass
def run_ldsc(self, args, use_ridge, nn, keep_large, evenodd_split, n_blocks=2):
#prepare LDSC objects
log = Logger()
args.h2 = args.sumstats
args.ref_ld = None
args.w_ld = None
args.n_blocks = n_blocks
args.M = None
args.not_M_5_50 = True
#if not ridge, the we'll use the LD-scores of our bins
if not use_ridge:
args = deepcopy(args)
args.ref_ld_chr = args.output_prefix+'.'
#read input data
if use_ridge or not args.compute_ldscores or True:
M_annot, w_ld_cname, ref_ld_cnames, df_sumstats, _ = sumstats._read_ld_sumstats(args, log, args.h2)
else:
#TODO: Don't reload files if we don't have to...
M_annot = self.M
w_ld_cname = 'w_ld'
ref_ld_cnames = self.df_bins.columns
try:
df_sumstats = pd.read_parquet(args.sumstats)
except (ArrowIOError, ArrowInvalid):
df_sumstats = pd.read_table(args.sumstats, sep='\s+')
###merge everything together...
#prepare LD-scores for S-LDSC run
ref_ld = np.array(df_sumstats[ref_ld_cnames], dtype=np.float32)
sumstats._check_ld_condnum(args, log, ref_ld_cnames)
if df_sumstats.shape[0] < 200000:
logging.warning('number of SNPs is smaller than 200k; this is almost always bad.')
n_snp = len(df_sumstats)
n_blocks = np.minimum(n_snp, args.n_blocks)
n_annot = len(ref_ld_cnames)
if n_annot<=1:
raise ValueError('Only one annotation found')
chisq_max = max(0.001*df_sumstats['N'].max(), MAX_CHI2)
#prepare chi2 statistics
s = lambda x: np.array(x).reshape((n_snp, 1))
chisq = s(df_sumstats.Z**2).astype(np.float32)
ii = np.ravel(chisq < chisq_max)
df_sumstats = df_sumstats.loc[ii, :]
if np.any(~ii):
logging.info('Removed {M} SNPs with chi^2 > {C} ({N} SNPs remain)'.format(
C=chisq_max, N=np.sum(ii), M=n_snp-np.sum(ii)))
n_snp = np.sum(ii) # lambdas are late-binding, so this works
ref_ld = np.array(df_sumstats[ref_ld_cnames], dtype=np.float32)
chisq = chisq[ii].reshape((n_snp, 1))
#Run S-LDSC
self.ref_ld_cnames = [c for c in ref_ld_cnames.str[:-2] if c not in SNP_COLUMNS]
hsqhat = regressions.Hsq(chisq,
ref_ld,
s(df_sumstats[w_ld_cname]),
s(df_sumstats.N),
M_annot, n_blocks=n_blocks, intercept=None,
twostep=None, old_weights=True,
chr_num=df_sumstats['CHR'],
loco=use_ridge, ridge_lambda=None,
standardize_ridge=True,
approx_ridge=True,
num_chr_sets=2,
evenodd_split=evenodd_split,
nn=nn,
keep_large=keep_large,
nnls_exact=args.nnls_exact
)
#save the results object
if use_ridge:
self.hsqhat_ridge = hsqhat
else:
self.hsqhat = hsqhat
def load_annotations_file(self, args, chr_num, use_ridge):
#load annotations file for this chromosome
if use_ridge:
annot_filenames = get_file_name(args, 'annot', chr_num, allow_multiple=True)
else:
annot_filenames = [get_file_name(args, 'bins', chr_num)]
#load annotation file(s)
df_annot_chr_list = []
for annot_filename in annot_filenames:
try:
df_annot_chr = pd.read_parquet(annot_filename)
except (ArrowIOError, ArrowInvalid):
df_annot_chr = pd.read_table(annot_filename)
df_annot_chr_list.append(df_annot_chr)
if len(df_annot_chr_list)==1:
df_annot_chr = df_annot_chr_list[0]
else:
for df in df_annot_chr_list[1:]:
for snp_col in SNP_COLUMNS:
if (df.shape[0] != df_annot_chr_list[0].shape[0]) or (np.any(df[snp_col] != df_annot_chr_list[0][snp_col])):
raise ValueError('Different annotation files of chromosome %d must be perfectly aligned'%(chr_num))
df.drop(columns=['CM'], inplace=True, errors='ignore')
df.drop(columns=SNP_COLUMNS, inplace=True, errors='raise')
df_annot_chr = pd.concat(df_annot_chr_list, axis=1)
#make sure all required columns were found
df_annot_chr.drop(columns=['CM'], inplace=True, errors='ignore')
found_missing_col = False
for colname in SNP_COLUMNS:
if colname not in df_annot_chr.columns:
logging.error('%s has a missing column: %s'%(annot_filename, colname))
found_missing_col = True
if found_missing_col:
raise ValueError('Missing columns found in %s'%(annot_filename))
#subset annotations if requested
if args.anno is not None:
anno_to_use = args.anno.split(',')
assert np.all(np.isin(anno_to_use, df_annot_chr.columns))
df_annot_chr = df_annot_chr[SNP_COLUMNS + anno_to_use]
#if we have more annotations that ref-ld, it might mean that some annotations were removed, so remove them from here as well
if not np.all(np.isin(self.ref_ld_cnames, df_annot_chr.columns)):
raise ValueError('Annotation names in annotations file do not match the one in the LD-scores file')
if len(self.ref_ld_cnames) < len(df_annot_chr.columns) - len(SNP_COLUMNS):
df_annot_chr = df_annot_chr[SNP_COLUMNS + self.ref_ld_cnames]
#make sure that we get the same columns as the ones in the LD-score files
if not np.all([c for c in df_annot_chr.columns if c not in SNP_COLUMNS ]== self.ref_ld_cnames):
raise ValueError('Annotation names in annotations file do not match the one in the LD-scores file')
return df_annot_chr
def compute_snpvar_chr(self, args, chr_num, use_ridge):
#load annotations file from disk
df_annot_chr = self.load_annotations_file(args, chr_num, use_ridge)
#extract taus from a jknife object
if use_ridge:
hsqhat = self.hsqhat_ridge
jknife = hsqhat.jknife_ridge
#make sure that the chromosome exists in one set
found_chrom = np.any([chr_num in chr_set for chr_set in jknife.chromosome_sets])
if not found_chrom:
raise ValueError('not all chromosomes have a taus estimate - please make sure that the intersection of SNPs with sumstats and with annotations data spans all 22 human chromosomes')
#find the relevant set number
set_num=None
for chr_set_i, chr_set in enumerate(jknife.chromosome_sets):
if chr_num not in chr_set:
assert set_num is None
set_num = chr_set_i
if set_num is None:
raise ValueError('Could not find Ridge predictions for chromosome %d'%(chr_num))
#compute and return snpvar
taus = jknife.est_loco_ridge[set_num][:hsqhat.n_annot] / hsqhat.Nbar
else:
hsqhat = self.hsqhat
jknife = hsqhat.jknife
if len(jknife.est_loco) != 22:
raise ValueError('not all chromosomes have a taus estimate - please make sure that the intersection of SNPs with sumstats and with annotations data spans all 22 human chromosomes')
taus = jknife.est_loco[chr_num-1][:hsqhat.n_annot] / hsqhat.Nbar
#save the taus to disk
taus_output_file = get_file_name(args, ('taus_ridge' if use_ridge else 'taus_nn'), chr_num, verify_exists=False)
df_taus = pd.Series(taus, index=df_annot_chr.drop(columns=SNP_COLUMNS, errors='raise').columns)
df_taus.index.name = 'ANNOTATION'
df_taus.name = 'ANNOTATION_COEFFICIENT'
df_taus.to_csv(taus_output_file, header=True, index=True, sep='\t')
#compute and return the snp variances
df_snpvar_chr = df_annot_chr.drop(columns=SNP_COLUMNS, errors='raise').dot(taus)
df_snpvar_chr = df_snpvar_chr.to_frame(name='SNPVAR')
df_snpvar_chr = pd.concat((df_annot_chr[SNP_COLUMNS], df_snpvar_chr), axis=1)
return df_snpvar_chr
def compute_snpvar(self, args, use_ridge):
logging.info('Computing per-SNP h^2 for each chromosome...')
#iterate over chromosomes
df_snpvar_chr_list = []
for chr_num in tqdm(range(1,23)):
df_snpvar_chr = self.compute_snpvar_chr(args, chr_num, use_ridge=use_ridge)
df_snpvar_chr_list.append(df_snpvar_chr)
df_snpvar = pd.concat(df_snpvar_chr_list, axis=0)
df_snpvar.reset_index(inplace=True, drop=True)
#save snpvar to a class member
if use_ridge:
self.df_snpvar_ridge = df_snpvar
else:
self.df_snpvar = df_snpvar
def create_df_bins(self, bin_sizes, df_snpvar, df_snpvar_sorted=None, min_bin_size=10):
#sort df_snpvar if needed
if df_snpvar_sorted is None:
df_snpvar_sorted = df_snpvar['SNPVAR'].sort_values()
assert bin_sizes.sum() == df_snpvar_sorted.shape[0]
#rearrange bins to prevent very small bins
bin_i = len(bin_sizes)-1
while True:
#if the current bin is large enough, proceed to the previous one
if bin_sizes[bin_i] >= min_bin_size:
bin_i -= 1
if bin_i==0: break
continue
#Compare the effects of the weakest bin in the current bin, and the strongest bin in the previous bin
bin_start_ind = bin_sizes[:bin_i].sum()
weakest_bin_snp = df_snpvar_sorted.iloc[::-1].iloc[bin_start_ind]
strongest_lastbin_snp = df_snpvar_sorted.iloc[::-1].iloc[bin_start_ind-1]
num_snps_to_transfer = np.minimum(min_bin_size-bin_sizes[bin_i], bin_sizes[bin_i-1])
bin_sizes[bin_i] += num_snps_to_transfer
bin_sizes[bin_i-1] -= num_snps_to_transfer
#if we emptied the previous bin, delete it
if bin_sizes[bin_i-1]==0:
bin_sizes = np.concatenate((bin_sizes[:bin_i-1], bin_sizes[bin_i:]))
bin_i -= 1
#if the current bin is large enough, move to the previous one
if bin_sizes[bin_i] >= min_bin_size:
bin_i -= 1
if bin_i==0: break
#create df_bins
ind=0
df_bins = pd.DataFrame(index=df_snpvar_sorted.index)
for bin_i, bin_size in enumerate(bin_sizes):
snpvar_bin = np.zeros(df_bins.shape[0], dtype=np.bool)
snpvar_bin[ind : ind+bin_size] = True
df_bins['snpvar_bin%d'%(len(bin_sizes) - bin_i)] = snpvar_bin
ind += bin_size
assert np.all(df_bins.sum(axis=0) == bin_sizes)
df_bins = df_bins.iloc[:, ::-1]
assert df_bins.shape[0] == df_snpvar.shape[0]
assert np.all(df_bins.sum(axis=1)==1)
#reorder df_bins
df_bins = df_bins.loc[df_snpvar.index]
df_bins = pd.concat((df_snpvar[SNP_COLUMNS], df_bins), axis=1)
assert np.all(df_bins.index == df_snpvar.index)
return df_bins
def partition_snps_Ckmedian(self, args, use_ridge):
logging.info('Clustering SNPs into bins using the R Ckmeans.1d.dp package')
#try loading the Ckmeans.1d.dp package
try:
import rpy2
import rpy2.robjects.numpy2ri as numpy2ri
try:
from importlib import reload
reload(rpy2.robjects.numpy2ri)
except:
pass
import rpy2.robjects as ro
ro.conversion.py2ri = numpy2ri
numpy2ri.activate()
from rpy2.robjects.packages import importr
importr('Ckmeans.1d.dp')
median_seg_func = ro.r('Ckmedian.1d.dp')
mean_seg_func = ro.r('Ckmeans.1d.dp')
except:
logging.error('Could not load the R package Ckmeans.1d.dp. Either install it or rerun PolyFun with --skip-Ckmedian')
logging.error('')
raise
#access the right class member
if use_ridge:
df_snpvar = self.df_snpvar_ridge
else:
df_snpvar = self.df_snpvar
#sort df_snpvar
df_snpvar_sorted = df_snpvar['SNPVAR'].sort_values()
#perform the segmentation
if args.num_bins is None or args.num_bins<=0:
logging.info('Determining the optimal number of bins (if this is slow, consider using --num-bins 20 (or some other number))')
seg_obj = median_seg_func(df_snpvar_sorted.values, k=np.array([5,30]))
else:
seg_obj = median_seg_func(df_snpvar_sorted.values, k=args.num_bins)
bin_sizes = np.array(seg_obj.rx2('size')).astype(np.int)
num_bins = len(bin_sizes)
logging.info('Ckmedian.1d.dp partitioned SNPs into %d bins'%(num_bins))
#define df_bins
df_bins = self.create_df_bins(bin_sizes, df_snpvar, df_snpvar_sorted=df_snpvar_sorted)
return df_bins
def partition_snps_Kmeans(self, args, use_ridge):
logging.info('Clustering SNPs into bins using K-means clustering with %d bins'%(args.num_bins))
#make sure that we can run K-means clustering
assert args.num_bins is not None and args.num_bins>0
try:
from sklearn.cluster import KMeans
except ImportError:
raise ImportError('sklearn not properly installed. Please reinstall it')
#access the right class member
if use_ridge: df_snpvar = self.df_snpvar_ridge
else: df_snpvar = self.df_snpvar
#perform K-means clustering
kmeans_obj = KMeans(n_clusters=args.num_bins)
kmeans_obj.fit(df_snpvar[['SNPVAR']])
assert kmeans_obj.cluster_centers_.shape[0] == args.num_bins
#Make sure that clusters are contiguous
bins_order = np.argsort(kmeans_obj.cluster_centers_[:,0])
for bin_i, cluster_label in enumerate(bins_order[:-1]):
next_cluster_label = bins_order[bin_i+1]
assert df_snpvar.loc[kmeans_obj.labels_==cluster_label, 'SNPVAR'].max() <= df_snpvar.loc[kmeans_obj.labels_==next_cluster_label, 'SNPVAR'].min()
#define bin_sizes
bin_sizes = np.bincount(kmeans_obj.labels_)[bins_order]
#define df_bins
df_bins = self.create_df_bins(bin_sizes, df_snpvar, df_snpvar_sorted=None)
return df_bins
def partition_snps_to_bins(self, args, use_ridge):
#if skip_ckmedian was specified, run regular K-means
if args.skip_Ckmedian:
self.df_bins = self.partition_snps_Kmeans(args, use_ridge=use_ridge)
else:
self.df_bins = self.partition_snps_Ckmedian(args, use_ridge=use_ridge)
def save_bins_to_disk(self, args):
logging.info('Saving SNP-bins to disk')
for chr_num in tqdm(range(1,23)):
#save bins file to disk
df_bins_chr = self.df_bins.query('CHR==%d'%(chr_num))
bins_chr_file = get_file_name(args, 'bins', chr_num, verify_exists=False)
df_bins_chr.to_parquet(bins_chr_file, index=False)
#save M files to disk
M_chr_file = get_file_name(args, 'M', chr_num, verify_exists=False)
M_chr = df_bins_chr.drop(columns=SNP_COLUMNS).sum(axis=0).values
np.savetxt(M_chr_file, M_chr.reshape((1, M_chr.shape[0])), fmt='%i')
def save_snpvar_to_disk(self, args, use_ridge, constrain_range):
if constrain_range:
logging.info('Saving constrained SNP variances to disk')
else:
logging.info('Saving SNP variances to disk')
#determine which df_snpvar to use
if use_ridge: df_snpvar = self.df_snpvar_ridge
else: df_snpvar = self.df_snpvar
#constrain the ratio between the largest and smallest snp-var
if constrain_range:
df_snpvar = df_snpvar.copy()
h2_total = df_snpvar['SNPVAR'].sum()
min_snpvar = df_snpvar['SNPVAR'].max() / args.q
df_snpvar.loc[df_snpvar['SNPVAR'] < min_snpvar, 'SNPVAR'] = min_snpvar
df_snpvar['SNPVAR'] *= h2_total / df_snpvar['SNPVAR'].sum()
assert np.isclose(df_snpvar['SNPVAR'].sum(), h2_total)
#merge snpvar with sumstats
try:
df_sumstats = | pd.read_parquet(args.sumstats) | pandas.read_parquet |
import logging
from configparser import SectionProxy
from pathlib import Path
from typing import List
import pandas as pd
from ch_bin.core.features.coverage import parse_coverages
from ch_bin.core.features.kmer_count import count_kmers
from ch_bin.core.features.preprocess import (
filter_short_contigs,
get_contig_lengths,
split_contigs,
)
from ch_bin.core.features.scm_gene import identify_marker_genomes
logger = logging.getLogger(__name__)
def create_dataset(
contig_fasta: Path,
coverage_file: Path,
operating_dir: Path,
kmer_ks: List[int],
kmer_counter_tool: str = "kmer_counter",
short_contig_threshold: int = 1000,
coverage_thresh: float = 0.4,
select_percentile: float = 0.95,
seed_contig_split_len: int = 10000,
) -> Path:
"""
Create a dataset using the given input and configuration.
:param contig_fasta: Contig file to use for kmer counting.
:param coverage_file: Coverage file containing abundance information.
:param operating_dir: Directory to write temp files to.
:param kmer_ks: list of k values of the kmers to count.
:param kmer_counter_tool: kmer counter tool to use. (kmer_counter/seq2vec)
:param short_contig_threshold: Threshold to filter the short contigs.
:param coverage_thresh: Threshold for a hit to be considered for the seed frequency distribution.
:param select_percentile: Percentile to use for selecting the number of seeds.
For example, 0.5 will take the median number of seeds.
:param seed_contig_split_len: Length to split the seed contigs.
:return: Path of merged dataset with initial bins marked.
"""
filtered_fasta = operating_dir / "filtered-contigs.fasta"
split_fasta = operating_dir / "split-contigs.fasta"
output_dataset_csv = operating_dir / "features.csv"
kmers_operation_dir = operating_dir / "kmers"
scm_operation_dir = operating_dir / "scm"
operating_dir.mkdir(parents=True, exist_ok=True)
kmers_operation_dir.mkdir(parents=True, exist_ok=True)
scm_operation_dir.mkdir(parents=True, exist_ok=True)
# 01. Calculate coverages
logger.info(">> Calculating coverages...")
df_coverages = parse_coverages(coverage_file)
# 02. Remove short contigs
logger.info(">> Removing contigs shorter than %s bp.", short_contig_threshold)
contig_lengths = get_contig_lengths(contig_fasta)
removed_contigs = filter_short_contigs(contig_fasta, filtered_fasta, threshold=short_contig_threshold)
logger.info("Removed %s (of %s) short contigs.", len(removed_contigs), len(contig_lengths))
# 03. Perform single-copy marker gene analysis
logger.info(">> Performing single-copy marker gene analysis...")
seed_clusters = identify_marker_genomes(
filtered_fasta,
contig_lengths,
scm_operation_dir,
coverage_thresh=coverage_thresh,
select_percentile=select_percentile,
)
logger.info("Found %s seeds.", len(seed_clusters))
# 04. Identify seed contigs and split them
logger.info(">> Splitting all contigs to contain %s bp.", seed_contig_split_len)
sub_contigs = split_contigs(filtered_fasta, split_fasta, seed_clusters, split_len=seed_contig_split_len)
logger.info("Found %s contigs after splitting.", len(sub_contigs))
# 05. Calculate normalized kmer frequencies
logger.info(">> Calculating normalized kmer frequencies using %s ...", kmer_counter_tool)
df_kmer_freq = None
for i, kmer_k in enumerate(kmer_ks):
df_curr = count_kmers(split_fasta, kmers_operation_dir, k=kmer_k, tool=kmer_counter_tool)
if df_kmer_freq is None:
df_kmer_freq = df_curr
continue
df_kmer_freq = df_kmer_freq.merge(
df_curr, left_on="CONTIG_NAME", right_on="CONTIG_NAME", suffixes=(f"x_{i}", f"y_{i}")
)
assert df_kmer_freq is not None, "No k-mer k values provided"
# 06. Create a dataset with the initial cluster information
logger.info(">> Creating a dataset with the initial cluster information...")
indexed_seed_clusters = zip(seed_clusters, range(len(seed_clusters)))
df_seed_clusters = | pd.DataFrame.from_records(indexed_seed_clusters, columns=["PARENT_NAME", "CLUSTER"]) | pandas.DataFrame.from_records |
# import Ipynb_importer
import pandas as pd
from .public_fun import *
# 全局变量
class glv:
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
## fun_01to06
class fun_01to06(object):
def __init__(self, data):
self.cf = [2, 1, 1, 17, 1, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"起始符",
"命令标识",
"应答标志",
"唯一识别码",
"数据单元加密方式",
"数据单元长度"
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"起始符":hex2str(self.oj["起始符"]),
"命令标识":dict_list_replace('02', self.oj['命令标识']),
"应答标志":dict_list_replace('03', self.oj['应答标志']),
"唯一识别码":hex2str(self.oj["唯一识别码"]),
"数据单元加密方式":dict_list_replace('05', self.oj['数据单元加密方式']),
"数据单元长度":hex2dec(self.oj["数据单元长度"]),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
self.mo = self.oj["命令标识"]
glv.set_value('data_f', self.next)
glv.set_value('data_mo', self.mo)
glv.set_value('data_01to07', self.o)
print('fun_01to06 done!')
## fun_07
class fun_07:
def __init__(self, data):
self.mo = glv.get_value("data_mo")
if self.mo == '01':
self.o = fun_07_01(glv.get_value('data_f'))
elif self.mo == '02' or self.mo == '03':
self.o = fun_07_02(glv.get_value('data_f'))
elif self.mo == '04':
self.o = fun_07_04(glv.get_value('data_f'))
elif self.mo == '05':
self.o = fun_07_05(glv.get_value('data_f'))
elif self.mo == '06':
self.o = fun_07_06(glv.get_value('data_f'))
else :
print('命令标识:',self.mo,'有误')
self.c = fun_07_cursor(glv.get_value('data_f'))
self.oj = dict(self.o.oj, **self.c.oj)
self.oj2 = {'数据单元':self.oj}
self.ol = pd.merge(self.o.ol, self.c.ol, left_index=True, right_index=True)
self.pj = dict(self.o.pj, **self.c.pj)
self.pj2 = {'数据单元':self.pj}
self.pl = pd.merge(self.o.pl, self.c.pl, left_index=True, right_index=True)
print('fun_07 done!')
## fun_07_01
class fun_07_01(object):
def __init__(self, data):
self.cf = [6, 2, 20, 1, 1]
self.cf_a = hexlist2(self.cf)
self.n = hex2dec(data[self.cf_a[3]:self.cf_a[4]])
self.m = hex2dec(data[self.cf_a[4]:self.cf_a[5]])
self.cf.append(self.n*self.m)
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"数据采集时间",
"登入流水号",
"ICCID",
"可充电储能子系统数",
"可充电储能系统编码长度",
"可充电储能系统编码",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'车辆登入': self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"数据采集时间":get_datetime(self.oj['数据采集时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"ICCID":hex2str(self.oj['ICCID']),
"可充电储能子系统数":hex2dec(self.oj['可充电储能子系统数']),
"可充电储能系统编码长度":hex2dec(self.oj['可充电储能系统编码长度']),
"可充电储能系统编码":fun_07_01.fun_07_01_06(self.oj['可充电储能系统编码'], self.oj['可充电储能子系统数'], self.oj['可充电储能系统编码长度']),
}
self.pj2 = {'车辆登入': self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_01', self.o)
print('fun_07_01 done!')
def fun_07_01_06(data, n, m):
if m=='00':
return "NA"
else :
n = hex2dec(n)
m = hex2dec(m) * 2
output = []
for i in range(n):
output_unit = hex2str(data[i * m: i* m +m])
output.append(output_unit)
return output
## fun_07_04
class fun_07_04(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_04', self.o)
print('fun_07_04 done!')
## fun_07_05
class fun_07_05(object):
def __init__(self, data):
self.cf = [6, 2, 12, 20, 1]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"平台登入时间",
"登入流水号",
"平台用户名",
"平台密码",
"加密规则",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = | pd.DataFrame([self.oj]) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import statistics
from datetime import datetime, timedelta
def _list_build(done_list, total_list, deliveries):
_case = done_list[-1]
_case_list = []
while _case < total_list[-1]:
_case += deliveries
if _case <= total_list[-1]:
_case_list.append(_case)
else:
if _case_list:
_case_list.append(
_case_list[-1]+(total_list[-1] - _case_list[-1]))
else:
_case_list.append(_case+(total_list[-1] - _case))
return _case_list
def forecasting(df):
done_list = df['done'].tolist()
date_list = df['date'].tolist()
total_list = df['total'].tolist()
deliveries = []
if set(done_list) == {0}:
return
deliveries = [done_list[i] - done_list[i-1]
for i in range(1, len(done_list))]
deliveries.append(done_list[0])
if len(deliveries) == 0:
return
_pencentil50 = int(np.percentile(deliveries, 50))
_pencentil75 = int(np.percentile(deliveries, 75))
deliveries = list(set(filter(lambda a: a != 0, deliveries)))
deliveries.sort()
if len(deliveries) == 0:
return
df_best = pd.DataFrame({'date': [date_list[-1]], 'best': [done_list[-1]]})
df_worst = pd.DataFrame(
{'date': [date_list[-1]], 'worst': [done_list[-1]]})
df_percentile_seventy_five = pd.DataFrame(
{'date': [date_list[-1]], 'seventy_five': [done_list[-1]]})
df_percentile_fifty = pd.DataFrame(
{'date': [date_list[-1]], 'fifty': [done_list[-1]]})
_best_list = _list_build(done_list, total_list, deliveries[-1])
_worst_list = _list_build(done_list, total_list, deliveries[0])
_percentile_seventy_five_list = _list_build(done_list, total_list, _pencentil75)
_percentile_fifty_list = _list_build(done_list, total_list, _pencentil50)
if len(date_list) > 1:
_cycle_delta = datetime.strptime(date_list[1], '%d/%m/%Y') - datetime.strptime(date_list[0], '%d/%m/%Y')
else:
_cycle_delta = datetime.strptime(date_list[0], '%d/%m/%Y')
datetime_object = datetime.strptime(date_list[-1], '%d/%m/%Y')
_datetime_object = datetime_object
for b in _best_list:
if len(date_list) > 1:
_datetime_object = _datetime_object + timedelta(days=_cycle_delta.days)
else:
_datetime_object = _datetime_object + timedelta(days=7)
df_best = df_best.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "best": [b], }), ignore_index=True)
_datetime_object = datetime_object
for w in _worst_list:
_datetime_object = _datetime_object + timedelta(days=7)
df_worst = df_worst.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "worst": [w], }), ignore_index=True)
_datetime_object = datetime_object
for p in _percentile_seventy_five_list:
if len(date_list) > 1:
_datetime_object = _datetime_object + timedelta(days=_cycle_delta.days)
else:
_datetime_object = _datetime_object + timedelta(days=7)
df_percentile_seventy_five = df_percentile_seventy_five.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "seventy_five": [p], }), ignore_index=True)
_datetime_object = datetime_object
for pf in _percentile_fifty_list:
if len(date_list) > 1:
_datetime_object = _datetime_object + timedelta(days=_cycle_delta.days)
else:
_datetime_object = _datetime_object + timedelta(days=7)
df_percentile_fifty = df_percentile_fifty.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "fifty": [pf], }), ignore_index=True)
df_bw = | pd.merge(df_worst, df_best, how='outer', on='date') | pandas.merge |
# Created Date: 12/09/2018
# Modified Date:
#
# Implements the Early Warning Alert Algorithm of Fire Crisis Classification module
# based on the forecasting weather data from FMI. It calculates the Fire Weather Index
# of Canadian Rating System.
# Also, it calculates the Fire Overall Crisis Level (PFRCL_Predicted Fire Crisis Level)
# based on FWI over the 9 days period
#
#----------------------------------------------------------------------------------------------------------
# Inputs: a) ftp files from EFFIS
#
# Outputs: TOP104_METRIC_REPORT which contains the ....
#
# Early Warning Alert Algorithm from Crisis Classification
#----------------------------------------------------------------------------------------------------------
#
import json, time, re
import os, errno, sys
import glob, gzip, pickle, shutil, tempfile, re, tarfile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from bus.bus_producer import BusProducer
from pathlib import Path
from datetime import datetime, timedelta
from collections import OrderedDict
from CRCL.FireCRisisCLassification.Topic104_Metric_Report import Top104_Metric_Report
from CRCL.FireCRisisCLassification.topic104Fire import topic104Fire
from CRCL.FireCRisisCLassification.Auxiliary_functions import Forest_Fire_Weather_Index, Fire_Overall_Crisis_Level
from CRCL.FireCRisisCLassification.Auxiliary_functions import open_netcdf, get_nc_file_contents, geo_idx, get_ftp, calc_Index
from CRCL.FireCRisisCLassification.Create_Queries_FirePilot import extract_forecasts_grid, extract_gribs
from CRCL.FireCRisisCLassification.parse_XML_dict import parse_XML_dict
from ftplib import FTP
from scipy.interpolate import griddata
from scipy.interpolate import Rbf
from scipy import interpolate
from netCDF4 import Dataset, num2date
def CrisisClassificationFire_PreEmerg():
#-----------------------------------------------------------------
ver = 'Ver11_2nd_Period'
# Create a directory to store the output files and TOPICS
#root_path = Path.cwd()
# Create a path
current_dirs_parent = os.getcwd()
root_path = current_dirs_parent + "/" + "CRCL/FireCRisisCLassification" + "/"
now = datetime.now()
directory = root_path + "TOPICS" + ver + "_" + str(now.year) + "_" + str(now.month) + "_" + str(now.day)
os.makedirs(directory, exist_ok=True)
# Start Timing Step 1
start_step1 = time.time()
# Store the time steps
time_duration_step = []
print("\n STEP 1: Fetch data from the ftp files from EFFIS \n")
#-----------------------------------------------------------------------------------
# STEP 1: Fetch data from the ftp files from EFFIS
#
#
#parameters for the get_ftp
url='dissemination.ecmwf.int'
Username='fire'
Password='<PASSWORD>'
# Points of Interest
points = [{'Name': 'Sueca', 'lat': 39.253509, 'long': -0.310381},
{'Name': 'Sollana', 'lat': 39.303946, 'long': -0.379010},
{'Name': 'Silla1', 'lat': 39.340604, 'long': -0.395129},
{'Name': 'Silla2', 'lat': 39.364153, 'long': -0.371332},
{'Name': 'Catarroja', 'lat': 39.371835, 'long': -0.350579},
{'Name':'<NAME>', 'lat':39.355179, 'long':-0.320472},
{'Name':'<NAME>', 'lat':39.386909, 'long': -0.331496}
]
# Center of the points of interest
N = float(len(points))
avglat = 0
avgln = 0
for p in points:
avglat = avglat + p['lat']
avgln = avgln + p['long']
center_points = [ round(avglat/N,5), round(avgln/N,5) ]
file_type = '*fwi.nc.gz'
fieldNames = ['fwi'] # ['danger_risk', 'fwi']
# fieldNames = ['danger_risk','bui','ffmc','dc', 'dmc','isi', 'dsr', 'fwi']
ncep_data = dict()
ncep_data['date_time'] = list()
# get the file name we dll from the ftp
ftp_dict = get_ftp(url, Username, Password)
# PATH variable include / at the end...
path = ftp_dict['PATH'] + str(ftp_dict['Date']) + '_fwi/fc/'
iter = [0] * len(fieldNames)
# Create data frame for all the points and FWI estimations
FWI = pd.DataFrame()
for pnt in range(len(points)):
datetime_x = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
days_x = 1
tempFWI = pd.DataFrame()
fwi_date = []
fwi_val = pd.DataFrame(columns=['FWI_lin', 'FWI_near', 'FWI_cubic', 'FWI_max', 'FWI_min', 'FWI_std', 'FWI_mean'])
for file in sorted(glob.glob(os.path.join(path, file_type))):
print('Working on: ' + file)
namelist = [name for name in fieldNames if re.search(name, file)]
name = namelist[0]
dataFile = open_netcdf(file)
lons, lats, date_time, fieldvalues = get_nc_file_contents(dataFile, name)
# Interpolate the FWI (output: linear, nearest, cubic)
LAT_V = points[pnt]['lat']
LON_V = points[pnt]['long']
# no_pnt: number of points to use for interpolation around to a point of interest per coordinate
no_pnt = 3
z_int = calc_Index(lons, lats, fieldvalues, LAT_V, LON_V, no_pnt)
temp_z = pd.DataFrame(z_int).transpose()
# remove cubic intepolation if exists
if temp_z.shape[0] > 3:
temp_z.drop(temp_z.columns[2], axis=1)
temp_z.columns = fwi_val.columns
#fwi_date.append(date_time[0].isoformat())
fwi_date.append(datetime_x.isoformat())
datetime_x = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=days_x)
days_x += 1
fwi_val = pd.concat([fwi_val, temp_z], axis=0, ignore_index=True)
# create the file structure
# timestamp = file.split('_')[2]
# my_file = Path('./' + name + ".txt")
# if my_file.is_file() and iter[fieldNames.index(name)] != 0:
# thefile = open(my_file, "a")
# else:
# thefile = open(my_file, "w")
#
# thefile.write(timestamp + ";" + str(temp_z) + '\n')
# thefile.close()
iter[fieldNames.index(name)] += 1
#-----------------------------------------------------
# Step 1.1: Create the whole dataframe FWI
num_rows = fwi_val.shape[0]
tempFWI['Name'] = pd.concat([pd.DataFrame([points[pnt]['Name']], columns=['Name']) for i in range(num_rows)],
ignore_index=True)
tempFWI['Lat'] = [points[pnt]['lat']] * num_rows
tempFWI['Long'] = [points[pnt]['long']] * num_rows
tempFWI['Date'] = fwi_date
tempFWI = pd.concat([tempFWI, fwi_val], axis=1, ignore_index=True)
# print("\n *************** iter = ", pnt)
# print("tempFWI ", tempFWI.shape ,"\n")
# print(tempFWI)
FWI = pd.concat([FWI, tempFWI], axis=0, ignore_index=True)
# Give column names to DataFrame
FWI.columns=['Name', 'Lat', 'Long', 'Date', 'FWI_lin', 'FWI_near','FWI_cubic','FWI_max', 'FWI_min', 'FWI_std', 'FWI_mean']
#print(FWI)
# Store FWI dataframe to excel file
fwixls = pd.ExcelWriter(directory + "/" + "FWI_data.xlsx")
FWI.to_excel(fwixls, 'Sheet1', index=False)
fwixls.save()
# End Timing Step 1
end_step1 = time.time()
time_duration_step.append( end_step1 - start_step1 )
print("\n ------------------------------------ \n")
print(" STEP 2: Estimate the Fire Danger ")
#----------------------------------------------------------------------------------------------
# STEP 2: Estimate the Fire Danger based on the WFI forecasting values per day for 9 days ahead
# for each point.
#
# choose interpolation method:
# - 'lin' for linear
# - 'near' for nearest neighbor
# - 'max' for maximum FWI value for all the grid points which are nearby the point of interest
# - 'cubic' for cubic interpolation method
#
# interp_method = 'near'
# interp_method = 'max'
# Start Timing Step 2
start_step2 = time.time()
interp_method = 'cubic'
df = Forest_Fire_Weather_Index(FWI, interp_method)
# Store df dataframe to excel file
dfxls = pd.ExcelWriter(directory + "/" + "DataFrame_FWI_DangerIndex.xlsx")
df.to_excel(dfxls, 'Sheet1', index=False)
dfxls.save()
# Keep only the WFI values from current date/time
cur_date = datetime.utcnow().replace(microsecond=0).isoformat().split("T")
df = df[df['Date'] >= cur_date[0]].reset_index()
# End Timing Step 2
end_step2 = time.time()
time_duration_step.append( end_step2 - start_step2 )
print("\n ------------------------------------ \n")
print(" STEP 3: Calculate Fire Overall Crisis Level ")
#----------------------------------------------------------------------------------------------
# STEP 3: Calculate Fire Overall Crisis Level per day over all the points
#
# Start Timing Step 3
start_step3 = time.time()
unq_dates = df['Date'].unique()
# FOCL per day
FOCL_list = []
categories = ['Very Low Danger', 'Low Danger', 'Moderate Danger', 'High Danger', 'Very High Danger', 'Extreme Danger']
for iter_date in range(len(unq_dates)):
CountFWI = []
ds_date = df[df['Date'] == unq_dates[iter_date]]
for i in range(len(categories)):
temp_df = pd.DataFrame(ds_date[0:df.shape[0]])
temp_df = pd.DataFrame(temp_df[temp_df['Fire_Danger'] == categories[i]])
item = {'Note': categories[i], 'Count': temp_df.shape[0]}
CountFWI.append(item)
focl = Fire_Overall_Crisis_Level(CountFWI)
focl.update({'Date': unq_dates[iter_date], 'Position': center_points})
FOCL_list.append(focl)
# Find the 1st observation which its FWI value exceeds Moderate category per point
# and also find the maximum FWI per point at the 9 days period
df_1st = pd.DataFrame()
df_max = | pd.DataFrame() | pandas.DataFrame |
import json
import datetime
import numpy as np
import pandas as pd
from pandas import json_normalize
import sqlalchemy as sq
import requests
from oanda.oanda import Account # oanda_v20_platform.
import os.path
import logging
from utils.fileops import get_abs_path
# TODO add updated to the database and have a check to update each day
class MarketData(Account):
"""Creates a sqlite database of current market information - for use by the trading strategies.
DB Browser https://sqlitebrowser.org/ can be used for easy viewing and filtering.
Focused on daily data it incudes for every tradable instrument in a table with:
The Last 60 days of data
Yesterdays Volume, Open, High, Low, and Close
The 55 day Vol, O, H, L, C
The 20 day Vol, O, H, L, C
The 10 day Vol, O, H, L, C
True Range for each day - a volatility measure that captures gaps
N the 20 day average True Range - like ATR(20)
And a summary table of market data (called marketdata) required for trading effectively,
which includes the following information:
Trading costs such as financing rates and the days they are applied.
Pip positions (decimal points) for each instrument
Margin rates
Max and Min Trailing stop distances
Maximum order sizes
The average spread
The volatility (as N)
The spread percentage of N - enabling the selection of a trading range where trade costs are minimised
e.g. if spread is 20 and stop loss (SP) and take profit (TP) are 100 your trading edge has
to be able to overcome that ~20% cost to have any chance of succeeding - some of the instruments
with high spread % N are very hard (impossible) to trade profitably without a crystall ball.
The N per 100X spread provides a quick way to get the target trading range where the spread cost will
be ~1% e.g. US30_USD currently has a Nper100Spread of 1.92 and an N of 380 so if TP and SP are set to
380/1.92=198 pips you will only lose ~1% in spread cost and with the daily range at 380 you should
hit one of the targets in a day or so. Compared to say USD_JPY which currently has a N of 0.60 and
a Nper100Spread of 0.4 so if spread cost is kept to ~1% it will be a move of 1.5 (0.6/0.4) more like
3-4 days before a target will be hit. This column can be sorted to get a top 10 of instruments that
are efficeint to trade.
The asset class and base currency
Args:
db_path str, default='data/marketdata.db':
The path to the database from the directory where this class is being run.
"""
def __init__(self, db_path=get_abs_path(['oanda_v20_platform','data', 'marketdata.db']), **kwargs):
super().__init__(**kwargs)
self.logger = logging.getLogger(__name__)
# setup connection to the database
self.db_path=db_path
self.engine = sq.create_engine(f'sqlite:///{self.db_path}')
# does the db exist if not create it by connecting
if not os.path.isfile(self.db_path):
conn = self.engine.connect()
conn.execute("commit")
conn.close()
self.logger.info(f"Empty MarketData database created at: {self.db_path}")
# get todays date
self.today = datetime.datetime.now().strftime('%Y-%m-%d')
try: # do we need to update marketdata?
sql = """SELECT DISTINCT(Updated) FROM marketdata;"""
data_date= pd.read_sql_query(sql, con=self.engine)
except: # only an empty db exists - build db
self.instruments = self.get_instruments()
self.build_db()
self.logger.info("Market data added to the database")
# is marketdata out of date?
if data_date.loc[0].item() != self.today:
self.instruments = self.get_instruments()
self.build_db()
self.logger.info("Market data updated in the database")
else: # get the marketdata
df = pd.read_sql_query(sql="""SELECT name, type, marginRate, N, avgSpread,
"financing.longRate", "financing.shortRate",
"Spread % N"
FROM marketdata """,
con=self.engine)
self.marketdata = df[['name', 'type', 'marginRate', 'N', 'avgSpread',
'financing.longRate', 'financing.shortRate',
'Spread % N']].sort_values(by='Spread % N')
def get_core_assets(self):
pass
self.core = pd.read_sql_query(sql="""SELECT DISTINCT Base Currency, Asset FROM marketdata""", con=self.engine)
self.core_list = self.core['Instrument'].to_list()
def build_db(self):
# add data to the instruments
for i in self.instruments['instruments']:
ix = i['name']
self.logger.info(f"Collecting market data for {ix}")
# add the spread data for each instrument
i['avgSpread'] = self.avg_spread(self.spreads(ix))
# get the price data
df = self.make_dataframe(self.get_daily_candles(ix))
i['volume'] = df.iloc[0, 0]
i['open'] = df.iloc[0, 1]
i['high'] = df.iloc[0, 2]
i['low'] = df.iloc[0, 3]
i['close'] = df.iloc[0, 4]
i['True Range'] = df.iloc[0, 5]
i['N'] = df.iloc[0, 6]
i['55DayHigh'] = df.iloc[0, 7]
i['20DayHigh'] = df.iloc[0, 8]
i['10DayHigh'] = df.iloc[0, 9]
i['55DayLow'] = df.iloc[0, 10]
i['20DayLow'] = df.iloc[0, 11]
i['10DayLow'] = df.iloc[0, 12]
tags = pd.DataFrame()
for n, i in enumerate(self.instruments['instruments']):
x = i['tags']
for l in x:
tags.loc[n, 'Asset Class'] = l['name']
fDayWeek = pd.DataFrame()
for n, i in enumerate(self.instruments['instruments']):
x = i['financing']['financingDaysOfWeek']
for d in x:
fDayWeek.loc[n, d['dayOfWeek'] + '-financing'] = d['daysCharged']
tags = tags.merge(fDayWeek, left_index=True, right_index=True)
df = json_normalize(self.instruments['instruments'])
df.drop(['tags', 'financing.financingDaysOfWeek'], inplace=True, axis=1)
df = df.merge(tags, left_index=True, right_index=True)
df['Spread % N'] = round(((df['avgSpread'] * 10.00**df['pipLocation']) / df['N'])*100, 2)
df['Nper100spread'] = df['N'] / ((df['avgSpread'] * 10.00**df['pipLocation']) * 100)
df['Base Currency'] = df.apply(lambda x: self.base(x), axis=1)
df['Asset'] = df.apply(lambda x: self.asset(x), axis=1)
df['Updated'] = self.today
df.to_sql('marketdata', con=self.engine, if_exists='replace')
def base(self, x):
return x['name'].split('_')[1]
def asset(self, x):
return x['name'].split('_')[0]
def get_instruments(self, params=None):
"""Get instruments and there associated static data.
By default gets the core instruments stored in a csv. These core
instruments are the unique available instruments.
Returns:
json: contains data that describes the available instruments
"""
url = self.base_url + '/v3/accounts/' + self.account + '/instruments'
r = requests.get(url, headers=self.headers)
self.logger.debug(f"Get Instruments returned {r} status code")
data = r.json()
return data
def avg_spread(self, spreads_json):
"""Calculate the average spread from the json returned by spreads
Args:
spreads_json: json produced by spreads function
Returns:
float: average of the average spreads
"""
spreads = []
for li in spreads_json['avg']:
spreads.append(li[1])
return np.mean(spreads)
def spreads(self, instrument, period=86400):
"""Returns a json with timestamps for every 15min
with the min, max and average spread.
Args:
instrument: str, required, e.g. "EUR_USD"
period: int, time period in seconds e.g. 86400 for day
Returns:
json: { "max": [[1520028000, 6], .....],
"avg": [[1520028000, 3.01822], ......],
"min": [[1520028000, 1.7], ......]
}
"""
params = {
"instrument": instrument,
"period": period
}
url = self.base_url + '/labs/v1/spreads/'
r = requests.get(url, headers=self.headers, params=params)
self.logger.debug(f"Spreads function returned {r} status code")
data = r.json()
return data
def get_daily_candles(self, instrument):
"""Request the daily candle data from the API
get 60 candles from yesterday
Args:
instrument: string describing the instrument in API
Returns:
json: candle data
"""
yesterday = (datetime.datetime.now() - pd.DateOffset(days=1)).strftime("%Y-%m-%d")
last_candle = yesterday + 'T22:00:00.000000000Z'
params = {
"to": last_candle,
"count": 60,
"granularity": "D",
# "includeFirst": True,
}
url = self.base_url + f'/v3/instruments/{instrument}/candles/'
r = requests.get(url, headers=self.headers, params=params)
self.logger.debug(f"Get daily candles returned {r} status code")
data = r.json()
return data
def make_dataframe(self, candles_data):
"""Take a json of candle data -
convert to a dataframe, calculate volatility,
max and min prices
Args:
candles_data ([json]): takes the json returned from get_candles
Returns:
sends data to sql table
pandas df: the last line of data
"""
df = json_normalize(candles_data.get('candles'))
df.rename(columns={'mid.c': 'close', 'mid.h': 'high',
'mid.l': 'low', 'mid.o': 'open'},
inplace=True)
df.set_index('time', inplace=True)
# the API returns strings these need to be converted to floats
df.volume = pd.to_numeric(df.volume)
df.close = pd.to_numeric(df.close)
df.high = pd.to_numeric(df.high)
df.low = | pd.to_numeric(df.low) | pandas.to_numeric |
from pathlib import Path
from typing import Optional, Union, List, Tuple, Any, Dict
import pandas as pd
from sklearn.metrics import log_loss, precision_recall_fscore_support
from tabulate import tabulate
from python.handwritten_baseline.pipeline.model.scripts.evaluation_utils import run_conll_evaluation
from python.handwritten_baseline import PREDICTION, LABEL, INSTANCE, IDX_A_DOC, IDX_A_MENTION, IDX_B_DOC, IDX_B_MENTION
from python.handwritten_baseline.pipeline.model.data_prep.mention_pair_generator import MentionPairGenerator
from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import \
convert_X_and_y_to_internal_pipeline_input_fmt, PAIR_PREDICTION_RANDOM_SEED, create_gold_clustering
from python.util.util import get_date_based_subdirectory
def obtain_final_coref_metrics(gold_clusters: pd.Series,
system_clusters: pd.Series,
data_split: str,
coref_metrics: Optional[Union[str, List[str]]],
serialization_dir: Optional[Path] = None) -> pd.DataFrame:
"""
Runs the CoNLL evaluation script for CDCR, once with metadoc True and once False.
:param gold_clusters:
:param system_clusters:
:param data_split:
:param coref_metrics:
:param serialization_dir: if given, CoNLL files and a metric overview will be written to this directory
:return: the metrics in a dataframe
"""
all_metrics = []
for meta_doc in [True, False]:
meta_doc_descr = "cross_doc" if meta_doc else "within_doc"
if serialization_dir is not None:
sub_serialization_dir = serialization_dir / meta_doc_descr
sub_serialization_dir.mkdir(exist_ok=True)
else:
sub_serialization_dir = None
metrics = run_conll_evaluation(gold_clusters,
system_clusters,
single_meta_document=meta_doc,
metrics=coref_metrics,
output_dir=sub_serialization_dir)
metrics = metrics.unstack(level="measure")
# write metric overview to file if desired
if sub_serialization_dir is not None:
filename = "_".join([data_split, meta_doc_descr]) + ".txt"
with (sub_serialization_dir / filename).open("w") as f:
f.write(tabulate(metrics, headers="keys"))
metrics["meta-doc"] = meta_doc
all_metrics.append(metrics)
all_metrics = | pd.concat(all_metrics) | pandas.concat |
import random
import timeit
import string
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Categorical, date_range, read_csv
from pandas.compat import PY2
from pandas.compat import cStringIO as StringIO
from ..pandas_vb_common import setup, BaseIO # noqa
class ToCSV(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
params = ['wide', 'long', 'mixed']
param_names = ['kind']
def setup(self, kind):
wide_frame = DataFrame(np.random.randn(3000, 30))
long_frame = DataFrame({'A': np.arange(50000),
'B': np.arange(50000) + 1.,
'C': np.arange(50000) + 2.,
'D': np.arange(50000) + 3.})
mixed_frame = DataFrame({'float': np.random.randn(5000),
'int': np.random.randn(5000).astype(int),
'bool': (np.arange(5000) % 2) == 0,
'datetime': date_range('2001',
freq='s',
periods=5000),
'object': ['foo'] * 5000})
mixed_frame.loc[30:500, 'float'] = np.nan
data = {'wide': wide_frame,
'long': long_frame,
'mixed': mixed_frame}
self.df = data[kind]
def time_frame(self, kind):
self.df.to_csv(self.fname)
class ToCSVDatetime(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
rng = date_range('1/1/2000', periods=1000)
self.data = DataFrame(rng, index=rng)
def time_frame_date_formatting(self):
self.data.to_csv(self.fname, date_format='%Y%m%d')
class ReadCSVDInferDatetimeFormat(object):
goal_time = 0.2
params = ([True, False], ['custom', 'iso8601', 'ymd'])
param_names = ['infer_datetime_format', 'format']
def setup(self, infer_datetime_format, format):
rng = date_range('1/1/2000', periods=1000)
formats = {'custom': '%m/%d/%Y %H:%M:%S.%f',
'iso8601': '%Y-%m-%d %H:%M:%S',
'ymd': '%Y%m%d'}
dt_format = formats[format]
self.data = StringIO('\n'.join(rng.strftime(dt_format).tolist()))
def time_read_csv(self, infer_datetime_format, format):
read_csv(self.data, header=None, names=['foo'], parse_dates=['foo'],
infer_datetime_format=infer_datetime_format)
class ReadCSVSkipRows(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
params = [None, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
N = 20000
index = tm.makeStringIndex(N)
df = DataFrame({'float1': np.random.randn(N),
'float2': np.random.randn(N),
'string1': ['foo'] * N,
'bool1': [True] * N,
'int1': np.random.randint(0, N, size=N)},
index=index)
df.to_csv(self.fname)
def time_skipprows(self, skiprows):
read_csv(self.fname, skiprows=skiprows)
class ReadUint64Integers(object):
goal_time = 0.2
def setup(self):
self.na_values = [2**63 + 500]
arr = np.arange(10000).astype('uint64') + 2**63
self.data1 = StringIO('\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO('\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self):
read_csv(self.data1, header=None, names=['foo'])
def time_read_uint64_neg_values(self):
read_csv(self.data2, header=None, names=['foo'])
def time_read_uint64_na_values(self):
read_csv(self.data1, header=None, names=['foo'],
na_values=self.na_values)
class S3(object):
# Make sure that we can read part of a file from S3 without
# needing to download the entire thing. Use the timeit.default_timer
# to measure wall time instead of CPU time -- we want to see
# how long it takes to download the data.
timer = timeit.default_timer
params = ([None, "gzip", "bz2"], ["python", "c"])
param_names = ["compression", "engine"]
def setup(self, compression, engine):
if compression == "bz2" and engine == "c" and PY2:
# The Python 2 C parser can't read bz2 from open files.
raise NotImplementedError
try:
import s3fs
except ImportError:
# Skip these benchmarks if `boto` is not installed.
raise NotImplementedError
ext = ""
if compression == "gzip":
ext = ".gz"
elif compression == "bz2":
ext = ".bz2"
self.big_fname = "s3://pandas-test/large_random.csv" + ext
def time_read_csv_10_rows(self, compression, engine):
# Read a small number of rows from a huge (100,000 x 50) table.
read_csv(self.big_fname, nrows=10, compression=compression,
engine=engine)
class ReadCSVThousands(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
params = ([',', '|'], [None, ','])
param_names = ['sep', 'thousands']
def setup(self, sep, thousands):
N = 10000
K = 8
data = np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))
df = DataFrame(data)
if thousands is not None:
fmt = ':{}'.format(thousands)
fmt = '{' + fmt + '}'
df = df.applymap(lambda x: fmt.format(x))
df.to_csv(self.fname, sep=sep)
def time_thousands(self, sep, thousands):
read_csv(self.fname, sep=sep, thousands=thousands)
class ReadCSVComment(object):
goal_time = 0.2
def setup(self):
data = ['A,B,C'] + (['1,2,3 # comment'] * 100000)
self.s_data = StringIO('\n'.join(data))
def time_comment(self):
read_csv(self.s_data, comment='#', header=None, names=list('abc'))
class ReadCSVFloatPrecision(object):
goal_time = 0.2
params = ([',', ';'], ['.', '_'], [None, 'high', 'round_trip'])
param_names = ['sep', 'decimal', 'float_precision']
def setup(self, sep, decimal, float_precision):
floats = [''.join(random.choice(string.digits) for _ in range(28))
for _ in range(15)]
rows = sep.join(['0{}'.format(decimal) + '{}'] * 3) + '\n'
data = rows * 5
data = data.format(*floats) * 200 # 1000 x 3 strings csv
self.s_data = StringIO(data)
def time_read_csv(self, sep, decimal, float_precision):
read_csv(self.s_data, sep=sep, header=None, names=list('abc'),
float_precision=float_precision)
def time_read_csv_python_engine(self, sep, decimal, float_precision):
read_csv(self.s_data, sep=sep, header=None, engine='python',
float_precision=None, names=list('abc'))
class ReadCSVCategorical(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
N = 100000
group1 = ['aaaaaaaa', 'bbbbbbb', 'cccccccc', 'dddddddd', 'eeeeeeee']
df = DataFrame(np.random.choice(group1, (N, 3)), columns=list('abc'))
df.to_csv(self.fname, index=False)
def time_convert_post(self):
| read_csv(self.fname) | pandas.read_csv |
import random
import pandas as pd
import pytest
from ..wrangling import *
######################
# BROADCASTING
######################
@pytest.fixture()
def right_df():
return pd.DataFrame(
{
'col1': [100, 200, 50],
'col2': [1, 2, 3]
},
index=pd.Index(['g', 'b', 'z'])
)
@pytest.fixture()
def left_df():
return pd.DataFrame({
'some_val': [10, 9, 8, 7, 6],
'fk': ['z', 'g', 'g', 'b', 't'],
'grp': ['r', 'g', 'r', 'g', 'r']
})
@pytest.fixture()
def right_df2(right_df):
df = | pd.concat([right_df, right_df * -1]) | pandas.concat |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Imports
# %%
import os
import pandas as pd
import seaborn as sns
import statsmodels.formula.api as smapi
import statsmodels.tsa.stattools as smtools
import core.config.config_ as cconconf
import core.plotting as coplotti
import helpers.hs3 as hs3
import im_v2.ccxt.universe.universe as imvccunun
import research_amp.cc.statistics as ramccsta
# %% [markdown]
# # Config
# %%
def get_cmtask323_config() -> cconconf.Config:
"""
Get task323-specific config.
"""
config = cconconf.Config()
# Load parameters.
config.add_subconfig("load")
config["load"]["aws_profile"] = "am"
config["load"]["data_dir"] = os.path.join(hs3.get_path(), "data")
# Data parameters.
config.add_subconfig("data")
config["data"]["data_type"] = "OHLCV"
config["data"]["universe_version"] = "v03"
config["data"]["vendor"] = "CCXT"
# Column names.
config.add_subconfig("column_names")
config["column_names"]["volume"] = "volume"
config["column_names"]["currency_pair"] = "currency_pair"
config["column_names"]["exchange"] = "exchange_id"
config["column_names"]["close"] = "close"
return config
config = get_cmtask323_config()
print(config)
# %% [markdown]
# # Functions
# %%
def compute_volatility_for_each_coin(data: pd.DataFrame, freq: str, span: int):
"""
Load and transform each (exchange-coin) dataframe to compute 18-period ema
volatility.
Parameters: initial DataFrame from the universe, resampling frequency
"""
data["date"] = data.index
# TODO(Max): Try out our resampe_df() for resampling.
resample_close = data.groupby(
["currency_pair", "exchange_id", pd.Grouper(key="date", freq=freq)]
)["close"].last()
vix_df = resample_close.pct_change().transform(
lambda x: x.ewm(span=span, adjust=False).std()
)
vix_df = vix_df.reset_index()
return vix_df
def get_daily_close(data: pd.DataFrame, freq: str):
"""
Load and transform each (exchange-coin) dataframe to compute volatility for
the whole period.
Parameters: initial DataFrame from the universe, resampling frequency
"""
data["date"] = data.index
resample_close = data.groupby(
["currency_pair", "exchange_id", pd.Grouper(key="date", freq=freq)]
)["close"].last()
resample_close = resample_close.reset_index()
return resample_close
def get_df_with_coin_price_volatility(data: pd.DataFrame, display_plot: bool):
"""
Unify volatility values for each coin and plot the graph.
Parameters: DataFrame with computed volatility, boolean value to plot the graph
"""
vix_df = data.groupby(
["currency_pair", pd.Grouper(key="date", freq=frequency)]
)["close"].mean()
vix_df = vix_df.to_frame()
vix_df.columns = ["ema_volatility"]
if display_plot:
sns.set(rc={"figure.figsize": (15, 8)})
sns.lineplot(
data=vix_df, x="date", y="ema_volatility", hue="currency_pair"
).set(title=f"EMA of Volatility for each coin")
return vix_df
def get_overall_returns_volatility(data: pd.DataFrame, display_plot: bool):
"""
Unify volatility values for each coin for the whole period and plot the
barplot.
Parameters: DataFrame with computed volatility, boolean value to plot the graph
"""
close_df = daily_close.groupby(
["currency_pair", pd.Grouper(key="date", freq=frequency)]
)["close"].mean()
rets_df = close_df.groupby(["currency_pair"]).pct_change()
std_df = rets_df.groupby(["currency_pair"]).std()
if display_plot:
coplotti.plot_barplot(
std_df.sort_values(ascending=False),
title="Volatility per coin for the whole period (1-day basis, log-scaled)",
figsize=[15, 7],
yscale="log",
)
return std_df
def perform_adf_test(df_daily: pd.DataFrame):
"""
Perform ADF test to check the stationarity of volatility values
Parameters: Daily DataFrame with computed volatility
"""
final_result = []
coin_list = df_daily.reset_index()["currency_pair"].unique()
for coin in coin_list:
result = pd.DataFrame()
df = df_daily.loc[[coin]]
df = df[df["ema_volatility"].notna()].copy()
X = df["ema_volatility"].values
test_result = smtools.adfuller(X)
result.loc[f"{coin}", "ADF Statistic"] = test_result[0]
result.loc[f"{coin}", "p-value"] = test_result[1]
final_result.append(result)
final_result = pd.concat(final_result)
final_result["is_unit_root_and_non-stationary (5% sign. level)"] = (
final_result["p-value"] > 0.05
)
return final_result
def get_df_with_volume_and_volatility(data: pd.DataFrame, freq: str, span: int):
"""
Load and transform each (exchange-coin) dataframe with volumes and close
prices (to compute 18-period ema volatility).
Parameters: initial DataFrame from the universe, resampling frequency
"""
data["date"] = data.index
close = data.groupby(
["currency_pair", "exchange_id", pd.Grouper(key="date", freq=freq)]
)["close"].last()
volume = data.groupby(
["currency_pair", "exchange_id", pd.Grouper(key="date", freq=freq)]
)["volume"].sum()
close_volume = pd.concat([close, volume], axis=1)
close_volume["ema_volatility"] = (
close_volume["close"]
.pct_change()
.transform(lambda x: x.ewm(span=span, adjust=False).std())
)
vix_volume = close_volume.reset_index()
return vix_volume
def run_regressions(df: pd.DataFrame, lag_volume: bool):
"""
Run OLS regression of volatility to volume (with intercept) for daily
values.
Parameters: price-volatility DataFrame, bool value for lagging volume variable
"""
volatility = df.groupby(
["currency_pair", pd.Grouper(key="date", freq=frequency)]
)["ema_volatility"].mean()
volume = df.groupby(
["currency_pair", | pd.Grouper(key="date", freq=frequency) | pandas.Grouper |
import pandas as pd
import sys
import utils
import cate_encoding
import config
def gen_user_stat_feat():
rows = None
tr = | pd.read_csv(config.data+'sample_train.csv',nrows=rows) | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for schemas."""
# pytype: skip-file
from __future__ import absolute_import
import typing
import unittest
import future.tests.base # pylint: disable=unused-import
import numpy as np
# patches unittest.testcase to be python3 compatible
import pandas as pd
from parameterized import parameterized
from past.builtins import unicode
import apache_beam as beam
from apache_beam.coders import RowCoder
from apache_beam.coders.typecoders import registry as coders_registry
from apache_beam.dataframe import schemas
from apache_beam.dataframe import transforms
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
Simple = typing.NamedTuple(
'Simple', [('name', unicode), ('id', int), ('height', float)])
coders_registry.register_coder(Simple, RowCoder)
Animal = typing.NamedTuple(
'Animal', [('animal', unicode), ('max_speed', typing.Optional[float])])
coders_registry.register_coder(Animal, RowCoder)
def matches_df(expected):
def check_df_pcoll_equal(actual):
actual = pd.concat(actual)
sorted_actual = actual.sort_values(by=list(actual.columns)).reset_index(
drop=True)
sorted_expected = expected.sort_values(
by=list(expected.columns)).reset_index(drop=True)
if not sorted_actual.equals(sorted_expected):
raise AssertionError(
'Dataframes not equal: \n\nActual:\n%s\n\nExpected:\n%s' %
(sorted_actual, sorted_expected))
return check_df_pcoll_equal
# Test data for all supported types that can be easily tested.
# Excludes bytes because it's difficult to create a series and dataframe bytes
# dtype. For example:
# pd.Series([b'abc'], dtype=bytes).dtype != 'S'
# pd.Series([b'abc'], dtype=bytes).astype(bytes).dtype == 'S'
COLUMNS = [
([375, 24, 0, 10, 16], np.int32, 'i32'),
([375, 24, 0, 10, 16], np.int64, 'i64'),
([375, 24, None, 10, 16], pd.Int32Dtype(), 'i32_nullable'),
([375, 24, None, 10, 16], pd.Int64Dtype(), 'i64_nullable'),
([375., 24., None, 10., 16.], np.float64, 'f64'),
([375., 24., None, 10., 16.], np.float32, 'f32'),
([True, False, True, True, False], np.bool, 'bool'),
(['Falcon', 'Ostrich', None, 3.14, 0], np.object, 'any'),
([True, False, True, None, False], pd.BooleanDtype(), 'bool_nullable'),
(['Falcon', 'Ostrich', None, 'Aardvark', 'Elephant'],
pd.StringDtype(),
'strdtype'),
] # type: typing.List[typing.Tuple[typing.List[typing.Any], typing.Any, str]]
NICE_TYPES_DF = pd.DataFrame(columns=[name for _, _, name in COLUMNS])
for arr, dtype, name in COLUMNS:
NICE_TYPES_DF[name] = | pd.Series(arr, dtype=dtype, name=name) | pandas.Series |
import timeboard as tb
from timeboard.interval import Interval, _VoidInterval
from timeboard.workshift import Workshift
from timeboard.exceptions import (OutOfBoundsError, PartialOutOfBoundsError,
VoidIntervalError)
from timeboard.timeboard import _Location, OOB_LEFT, OOB_RIGHT, LOC_WITHIN
import datetime
import pandas as pd
import numpy as np
import pytest
def tb_12_days():
return tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0])
# 31 01 02 03 04 05 06 07 08 09 10 11 12
# 0 1 0 0 1 0 0 1 0 0 1 0 0
class TestIntervalLocatorFromReference(object):
def test_interval_locator_default(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
None, False, False) == [_Location(0, LOC_WITHIN),
_Location(12, LOC_WITHIN)]
def test_interval_locator_with_two_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00', '08 Jan 2017 15:00'), False, False) == [
_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
# reverse is ok; it is taken care later in 'get_interval'
assert clnd._get_interval_locs_from_reference(
('08 Jan 2017 15:00', '02 Jan 2017 15:00'), False, False) == [
_Location(8, LOC_WITHIN), _Location(2, LOC_WITHIN)]
def test_interval_locator_with_with_excessive_item(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00','08 Jan 2017 15:00','something'), False,
False) == [_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
def test_interval_locator_with_two_pd_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
(pd.Timestamp('02 Jan 2017 15:00'),
pd.Timestamp('08 Jan 2017 15:00')),
False, False) == [
_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
def test_interval_locator_with_two_datettime_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
(datetime.datetime(2017, 1, 2, 15, 0, 0),
datetime.datetime(2017, 1, 8, 15, 0, 0)),
False, False) == [
_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
def test_interval_locator_with_OOB_ts(self):
clnd = tb_12_days()
# only one end of the interval is OOB
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00', '13 Jan 2017 15:00'), False, False) == [
_Location(2, LOC_WITHIN), _Location(None, OOB_RIGHT)]
assert clnd._get_interval_locs_from_reference(
('30 Dec 2016 15:00', '08 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(8, LOC_WITHIN)]
# the interval spans over the timeboard
assert clnd._get_interval_locs_from_reference(
('30 Dec 2016 15:00', '13 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(None, OOB_RIGHT)]
assert clnd._get_interval_locs_from_reference(
('13 Jan 2017 15:00', '30 Dec 2016 15:00'), False, False) == [
_Location(None, OOB_RIGHT), _Location(None, OOB_LEFT)]
# the interval is completely outside the timeboard
assert clnd._get_interval_locs_from_reference(
('25 Dec 2016 15:00', '30 Dec 2016 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(None, OOB_LEFT)]
assert clnd._get_interval_locs_from_reference(
('30 Dec 2016 15:00', '25 Dec 2016 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(None, OOB_LEFT)]
assert clnd._get_interval_locs_from_reference(
('13 Jan 2017 15:00', '15 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_RIGHT), _Location(None, OOB_RIGHT)]
assert clnd._get_interval_locs_from_reference(
('15 Jan 2017 15:00', '13 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_RIGHT), _Location(None, OOB_RIGHT)]
def test_interval_locator_from_pd_periods(self):
clnd = tb_12_days()
# if we could not directly Timestamp() a reference, we try to call its
# `to_timestamp` method which would return reference's start time
# First day of Jan is inside clnd
assert clnd._get_interval_locs_from_reference(
(pd.Period('02 Jan 2017', freq='M'), '11 Jan 2017 15:00'),
False, False) == [
_Location(1, LOC_WITHIN), _Location(11, LOC_WITHIN)]
# While 31 Dec is within clnd, the first day of Dec is outside
assert clnd._get_interval_locs_from_reference(
(pd.Period('31 Dec 2016', freq='M'), '11 Jan 2017 15:00'),
False, False) == [
_Location(None, OOB_LEFT), _Location(11, LOC_WITHIN)]
# freq=W begins weeks on Mon which is 02 Jan 2017
assert clnd._get_interval_locs_from_reference(
(pd.Period('05 Jan 2017', freq='W'), '11 Jan 2017 15:00'),
False, False) == [
_Location(2, LOC_WITHIN), _Location(11, LOC_WITHIN)]
# freq=W-MON ends weeks on Mondays, and 02 Jan is Monday,
# but this week begins on Tue 27 Dec 2016 which is outside the timeboard
assert clnd._get_interval_locs_from_reference(
(pd.Period('02 Jan 2017', freq='W-MON'), '11 Jan 2017 15:00'),
False, False) == [
_Location(None, OOB_LEFT), _Location(11, LOC_WITHIN)]
def test_interval_locator_with_bad_ts(self):
clnd = tb_12_days()
with pytest.raises(ValueError):
clnd._get_interval_locs_from_reference(
('bad_timestamp', '08 Jan 2017 15:00'), False, False)
with pytest.raises(ValueError):
clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00', 'bad_timestamp'), False, False)
def test_interval_locator_with_singletons(self):
clnd = tb_12_days()
with pytest.raises(TypeError):
clnd._get_interval_locs_from_reference(('08 Jan 2017 15:00',),
False, False)
with pytest.raises(TypeError):
clnd._get_interval_locs_from_reference('08 Jan 2017 15:00',
False, False)
with pytest.raises(TypeError):
clnd._get_interval_locs_from_reference(
pd.Timestamp('08 Jan 2017 15:00'), False, False)
class TestIntervalStripLocs(object):
def test_interval_strip_locs(self):
clnd = tb_12_days()
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], False, False) \
== [_Location(2,'anything'),_Location(8, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], True, False) \
== [_Location(3,'anything'),_Location(8, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], False, True) \
== [_Location(2,'anything'),_Location(7, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(7, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(4, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(3, 'whatever')]
def test_interval_strip_locs_single_unit(self):
clnd = tb_12_days()
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], False, False) \
== [_Location(2,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], True, False) \
== [_Location(3,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], False, True) \
== [_Location(2,'anything'),_Location(1, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(1, 'whatever')]
def test_interval_strip_locs_corner_cases(self):
clnd = tb_12_days()
assert clnd._strip_interval_locs(
[_Location(0, 'anything'), _Location(0, 'whatever')], True, True) \
== [_Location(1, 'anything'), _Location(-1, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(-4, 'anything'), _Location(-2, 'whatever')], True, True) \
== [_Location(-3, 'anything'), _Location(-3, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(2, 'whatever')], False, False) \
== [_Location(None,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(2, 'whatever')], True, False) \
== [_Location(None,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(2, 'whatever')], False, True) \
== [_Location(None,'anything'),_Location(1, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(None, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(None, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(None, 'whatever')], True, True) \
== [_Location(None,'anything'),_Location(None, 'whatever')]
def test_interval_strip_locs_bad_locs(self):
# in '_strip_interval_locs' we do not care about validity of 'locs'
# type and value; other parts of 'get_interval' should care about this
assert True
def test_get_interval_with_bad_closed(self):
clnd = tb_12_days()
with pytest.raises(ValueError):
clnd.get_interval(closed='010')
with pytest.raises(ValueError):
clnd.get_interval(closed=True)
class TestIntervalConstructorWithTS(object):
def test_interval_constructor_with_two_ts(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'))
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
ivlx = clnd(('02 Jan 2017 15:00', '08 Jan 2017 15:00'))
assert ivlx._loc == ivl._loc
def test_interval_constructor_with_none_ts(self):
clnd = tb_12_days()
ivl = clnd.get_interval((None, '08 Jan 2017 15:00'))
assert ivl._loc == (0,8)
ivl = clnd.get_interval((np.nan, '08 Jan 2017 15:00'))
assert ivl._loc == (0,8)
ivlx = clnd((None, '08 Jan 2017 15:00'))
assert ivlx._loc == ivl._loc
ivl = clnd.get_interval(('02 Jan 2017 15:00', None))
assert ivl._loc == (2,12)
ivl = clnd.get_interval(('02 Jan 2017 15:00', pd.NaT))
assert ivl._loc == (2,12)
ivl = clnd(('02 Jan 2017 15:00', pd.NaT))
assert ivl._loc == (2,12)
ivl = clnd.get_interval((None, None))
assert ivl._loc == (0,12)
ivl = clnd.get_interval((np.nan, None))
assert ivl._loc == (0,12)
ivl = clnd((pd.NaT, np.nan))
assert ivl._loc == (0,12)
def test_interval_iterator(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'))
wslist1 = []
for ws in ivl:
wslist1.append(ws)
wslist2 = list(ivl)
assert len(wslist1) == 7
assert len(wslist2) == 7
for i in range(7):
assert isinstance(wslist1[i], Workshift)
assert isinstance(wslist2[i], Workshift)
assert wslist1[i]._loc == i+2
assert wslist1[i]._loc == i+2
def test_interval_constructor_with_two_ts_open_ended(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='11')
assert ivl._loc == (2,8)
assert len(ivl) == 7
ivlx = clnd(('02 Jan 2017 15:00', '08 Jan 2017 15:00'), closed='11')
assert ivlx._loc == ivl._loc
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='01')
assert ivl._loc == (3,8)
assert len(ivl) == 6
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='10')
assert ivl._loc == (2,7)
assert len(ivl) == 6
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='00')
assert ivl._loc == (3,7)
assert len(ivl) == 5
ivl = clnd.get_interval(('02 Jan 2017 15:00', '03 Jan 2017 15:00'),
closed='01')
assert ivl._loc == (3,3)
assert len(ivl) == 1
ivl = clnd.get_interval(('02 Jan 2017 15:00', '03 Jan 2017 15:00'),
closed='10')
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_constructor_with_closed_leads_to_void(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'))
assert ivl._loc == (2,2)
assert len(ivl) == 1
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'),
closed='01')
with pytest.raises(VoidIntervalError):
clnd(('02 Jan 2017 15:00', '02 Jan 2017 15:00'), closed='01')
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'),
closed='10')
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'),
closed='00')
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '03 Jan 2017 15:00'),
closed='00')
def test_interval_constructor_with_OOB_ts(self):
clnd = tb_12_days()
# only one end of the interval is OOB
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval(('02 Jan 2017 15:00', '13 Jan 2017 15:00'))
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(('02 Jan 2017 15:00', '13 Jan 2017 15:00'),
clip_period=False)
with pytest.raises(PartialOutOfBoundsError):
clnd(('02 Jan 2017 15:00', '13 Jan 2017 15:00'),
clip_period=False)
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval(('30 Dec 2016 15:00', '08 Jan 2017 15:00'))
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(('30 Dec 2016 15:00', '08 Jan 2017 15:00'),
clip_period=False)
# the interval spans over the timeboard
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval(('30 Dec 2016 15:00', '13 Jan 2017 15:00'))
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(('30 Dec 2016 15:00', '13 Jan 2017 15:00'),
clip_period=False)
with pytest.raises(VoidIntervalError):
clnd.get_interval(('13 Jan 2017 15:00', '30 Dec 2016 15:00'))
# the interval is completely outside the timeboard
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('25 Dec 2016 15:00', '30 Dec 2016 15:00'))
# OOBError is ok, since we cannot clip a complete outsider anyway
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('30 Dec 2016 15:00', '25 Dec 2016 15:00'))
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('13 Jan 2017 15:00', '15 Jan 2017 15:00'))
# OOBError is ok, since we cannot clip a complete outsider anyway
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('15 Jan 2017 15:00', '13 Jan 2017 15:00'))
def test_interval_constructor_with_same_ts(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'))
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 2, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 3, 0, 0, 0)
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_constructor_reverse_ts_to_same_BU(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 10:00'))
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 2, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 3, 0, 0, 0)
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_constructor_reverse_ts(self):
clnd = tb_12_days()
with pytest.raises(VoidIntervalError):
clnd.get_interval(('08 Jan 2017 15:00', '02 Jan 2017 15:00'))
with pytest.raises(VoidIntervalError):
clnd(('08 Jan 2017 15:00', '02 Jan 2017 15:00'))
def test_interval_constructor_two_pd_periods_as_ts(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='31 Mar 2017',
layout=[0, 1, 0])
ivl = clnd.get_interval((pd.Period('05 Jan 2017 15:00', freq='M'),
pd.Period('19 Feb 2017 15:00', freq='M')))
assert ivl.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 2, 1, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 2, 2, 0, 0, 0)
assert ivl._loc == (1,32)
assert len(ivl) == 32
ivlx = clnd((pd.Period('05 Jan 2017 15:00', freq='M'),
pd.Period('19 Feb 2017 15:00', freq='M')))
assert ivlx._loc == ivl._loc
class TestIntervalConstructorDefault(object):
def test_interval_constructor_default(self):
clnd = tb_12_days()
ivl = clnd.get_interval()
assert ivl.start_time == datetime.datetime(2016, 12, 31, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert ivl._loc == (0,12)
assert len(ivl) == 13
def test_interval_constructor_default_open_ended(self):
clnd = tb_12_days()
ivl = clnd.get_interval(closed='00')
assert ivl.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 11, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 12, 0, 0, 0)
assert ivl._loc == (1,11)
assert len(ivl) == 11
def test_interval_constructor_default_closed_leads_to_void(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='01 Jan 2017',
layout=[1])
with pytest.raises(VoidIntervalError):
ivl = clnd.get_interval(closed='01')
with pytest.raises(VoidIntervalError):
ivl = clnd.get_interval(closed='10')
with pytest.raises(VoidIntervalError):
ivl = clnd.get_interval(closed='00')
class TestIntervalConstructorFromPeriod(object):
def test_interval_constructor_with_period(self):
clnd = tb_12_days()
ivl = clnd.get_interval('02 Jan 2017 15:00', period='W')
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
ivlx = clnd('02 Jan 2017 15:00', period='W')
assert ivlx._loc == ivl._loc
def test_interval_constructor_with_OOB_period(self):
clnd = tb_12_days()
#period is defined by good ts but extends beyond the left bound of clnd
ivl = clnd.get_interval('01 Jan 2017 15:00', period='W')
assert ivl._loc == (0, 1)
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval('01 Jan 2017 15:00', period='W',
clip_period=False)
with pytest.raises(PartialOutOfBoundsError):
clnd('01 Jan 2017 15:00', period='W',
clip_period=False)
#same period defined by outside ts
ivl = clnd.get_interval('26 Dec 2016 15:00', period='W')
assert ivl._loc == (0, 1)
#period is defined by good ts but extends beyond the right bound of clnd
ivl = clnd.get_interval('10 Jan 2017 15:00', period='W')
assert ivl._loc == (9, 12)
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval('10 Jan 2017 15:00', period='W',
clip_period=False)
#same period defined by outside ts
ivl = clnd.get_interval('14 Jan 2017 15:00', period='W')
assert ivl._loc == (9, 12)
#period spans over clnd (a year ending on 31 March)
ivl = clnd.get_interval('10 Mar 2017 15:00', period='A-MAR')
assert ivl._loc == (0, 12)
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval('10 Mar 2017 15:00', period='A-MAR',
clip_period=False)
#period is completely outside clnd
with pytest.raises(OutOfBoundsError):
clnd.get_interval('18 Jan 2017 15:00', period='W')
def test_interval_constructor_with_bad_period(self):
clnd = tb_12_days()
with pytest.raises(ValueError):
clnd.get_interval('02 Jan 2017 15:00', period='bad_period')
with pytest.raises(ValueError):
clnd('02 Jan 2017 15:00', period='bad_period')
with pytest.raises(ValueError):
clnd.get_interval('bad_timestamp', period='W')
def test_interval_constructor_from_pd_period(self):
clnd = tb_12_days()
ivl = clnd.get_interval(pd.Period('05 Jan 2017 15:00', freq='W'))
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2, 8)
assert len(ivl) == 7
# if we call timeboard instance directly, it cannot figure that we
# want a period, as only one argument is given and it can be converted
# to a timestamp
ws = clnd(pd.Period('05 Jan 2017 15:00', freq='W'))
assert ws._loc == 2
def test_interval_constructor_from_pd_period_OOB(self):
clnd = tb_12_days()
# period defined by good ts but extends beyond the reight bound of clnd
ivl = clnd.get_interval(pd.Period('10 Jan 2017 15:00', freq='W'))
assert ivl._loc == (9, 12)
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(pd.Period('10 Jan 2017 15:00', freq='W'),
clip_period=False)
# period defined by good ts but extends beyond the left bound of clnd
ivl = clnd.get_interval(pd.Period('01 Jan 2017 15:00', freq='W'))
assert ivl._loc == (0, 1)
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(pd.Period('01 Jan 2017 15:00', freq='W'),
clip_period=False)
# period overlapping clnd
ivl = clnd.get_interval(pd.Period('08 Mar 2017 15:00', freq='A-MAR'))
assert ivl._loc == (0, 12)
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(pd.Period('08 Mar 2017 15:00', freq='A-MAR'),
clip_period=False)
# period completely outside clnd
with pytest.raises(OutOfBoundsError):
clnd.get_interval(pd.Period('25 Jan 2017 15:00', freq='W'))
def test_interval_constructor_period_smaller_than_bu(self):
clnd = tb.Timeboard(base_unit_freq='H',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=[0, 1],
)
clnd2 = tb.Timeboard(base_unit_freq='H',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=[0, 1],
workshift_ref='end'
)
# no ws reference time falls within this period:
with pytest.raises(VoidIntervalError):
clnd.get_interval('04 Oct 2017 01:15', period='T')
with pytest.raises(VoidIntervalError):
clnd2.get_interval('04 Oct 2017 01:15', period='T')
# reference time of clnd.ws 1 (01:00) falls within this period:
ivl = clnd.get_interval('04 Oct 2017 01:00', period='T')
assert ivl._loc == (1, 1)
# but not within this
with pytest.raises(VoidIntervalError):
clnd.get_interval('04 Oct 2017 01:59', period='T')
# vice versa for clnd 2
with pytest.raises(VoidIntervalError):
clnd2.get_interval('04 Oct 2017 01:00', period='T')
ivl = clnd2.get_interval('04 Oct 2017 01:59', period='T')
assert ivl._loc == (1, 1)
def test_interval_constructor_period_straddles_2_ws(self):
shifts = tb.Organizer(marker='90T', structure=[0, 1, 0, 2])
clnd = tb.Timeboard(base_unit_freq='T',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=shifts,
)
# period straddles ws 0 and 1
# but only ref time of ws 1 falls into the period
ivl = clnd.get_interval('04 Oct 2017 01:00', period='H')
assert ivl._loc == (1, 1)
def test_interval_constructor_period_start_aligned_end_inside_ws(self):
shifts = tb.Organizer(marker='90T', structure=[0, 1, 0, 2])
clnd = tb.Timeboard(base_unit_freq='T',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=shifts,
)
# period (00:00 - 00:59) is inside ws 0 (00:00 - 01:29)
# ref time of ws 0 falls into the period
ivl = clnd.get_interval('04 Oct 2017 00:00', period='H')
assert ivl._loc == (0, 0)
# now ref time is end time and ws 0 ref time is outside the period
clnd = tb.Timeboard(base_unit_freq='T',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=shifts,
workshift_ref='end'
)
with pytest.raises(VoidIntervalError):
clnd.get_interval('04 Oct 2017 00:00', period='H')
def test_interval_constructor_period_begin_inside_ws_end_aligned(self):
shifts = tb.Organizer(marker='90T', structure=[0, 1, 0, 2])
clnd = tb.Timeboard(base_unit_freq='T',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=shifts,
)
# period (02:00 - 02:59) is inside ws 1 (01:30 - 02:59)
# ref time of ws 1 is outside the period
with pytest.raises(VoidIntervalError):
clnd.get_interval('04 Oct 2017 02:00', period='H')
# now ref time is end time and ws 1 ref time is within the period
clnd = tb.Timeboard(base_unit_freq='T',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=shifts,
workshift_ref='end'
)
ivl = clnd.get_interval('04 Oct 2017 02:00', period='H')
assert ivl._loc == (1, 1)
def test_interval_constructor_period_entirely_inside_ws(self):
shifts = tb.Organizer(marker='3H', structure=[0, 1, 0, 2])
clnd = tb.Timeboard(base_unit_freq='T',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=shifts,
)
# period (04:00 - 04:59) is inside workshift (03:00 - 05:59)
# and does not includes workshift's start or end times.
# No matter if the ref time is 'start' or 'end',
# it is outside the period
with pytest.raises(VoidIntervalError):
clnd.get_interval('04 Oct 2017 04:00', period='H')
clnd = tb.Timeboard(base_unit_freq='T',
start='04 Oct 2017', end='04 Oct 2017 23:59',
layout=shifts,
workshift_ref='end'
)
with pytest.raises(VoidIntervalError):
clnd.get_interval('04 Oct 2017 04:00', period='H')
# if we supported workshift_ref being somewhere in the middle of
# workshift, an interval could be constructed
class TestIntervalConstructorWithLength(object):
def test_interval_constructor_with_length(self):
clnd = tb_12_days()
ivl = clnd.get_interval('02 Jan 2017 15:00', length=7)
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
ivlx = clnd('02 Jan 2017 15:00', length=7)
assert ivlx._loc == ivl._loc
def test_interval_constructor_with_negative_length(self):
clnd = tb_12_days()
ivl = clnd.get_interval('08 Jan 2017 15:00', length=-7)
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
def test_interval_constructor_with_length_one(self):
clnd = tb_12_days()
ivl = clnd.get_interval('02 Jan 2017 15:00', length=1)
assert ivl._loc == (2,2)
assert len(ivl) == 1
ivl = clnd.get_interval('02 Jan 2017 15:00', length=-1)
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_constructor_with_zero_length(self):
# same treatment as interval with reverse timestamps
clnd = tb_12_days()
with pytest.raises(VoidIntervalError):
clnd.get_interval('08 Jan 2017 15:00', length=0)
with pytest.raises(VoidIntervalError):
clnd('08 Jan 2017 15:00', length=0)
def test_interval_constructor_with_length_OOB(self):
clnd = tb_12_days()
# May be build interval from the portion falling inside clnd?
# NO. You either get an expected result or an exception, not
# some other interval you did not ask for
#
# starts inside clnd, ends OOB
# this is PartialOutOfBoundsError because we can clip the interval
# at timeboard's bound and may not care about the outside
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval('02 Jan 2017 15:00', length=20)
with pytest.raises(PartialOutOfBoundsError):
clnd('02 Jan 2017 15:00', length=20)
# we cannot start an interval OOB because we cannot the outside
# is not structured into workshifts, hence we cannot count them.
# So this is NOT PartialOutOfBoundsError
with pytest.raises(OutOfBoundsError):
clnd.get_interval('30 Dec 2016 15:00', length=10)
# starts and ends OOB, spans over clnd
with pytest.raises(OutOfBoundsError):
clnd.get_interval('30 Dec 2016 15:00', length=20)
# completely outside clnd
with pytest.raises(OutOfBoundsError):
clnd.get_interval('20 Jan 2017 15:00', length=10)
def test_interval_constructor_with_bad_length(self):
clnd = tb_12_days()
with pytest.raises(TypeError):
clnd.get_interval('02 Jan 2017 15:00', length=5.5)
with pytest.raises(TypeError):
clnd('02 Jan 2017 15:00', length=5.5)
with pytest.raises(TypeError):
clnd.get_interval('02 Jan 2017 15:00', length='x')
with pytest.raises(ValueError):
clnd.get_interval('bad_timestamp', length=5)
class TestIntervalConstructorBadArgs(object):
def test_interval_constructor_bad_arg_combinations(self):
clnd = tb_12_days()
with pytest.raises(TypeError):
clnd.get_interval('01 Jan 2017')
with pytest.raises(TypeError):
clnd.get_interval(('01 Jan 2017',))
with pytest.raises(TypeError):
clnd.get_interval('01 Jan 2017', '05 Jan 2017')
with pytest.raises(TypeError):
clnd.get_interval(('01 Jan 2017',), length=1)
with pytest.raises(TypeError):
clnd.get_interval(('anyhting', 'anything'), length=1)
with pytest.raises(TypeError):
clnd.get_interval(('02 Jan 2017',), period='W')
with pytest.raises(TypeError):
clnd.get_interval(('anyhting', 'anything'), period='W')
with pytest.raises(TypeError):
clnd.get_interval('anyhting', length=1, period='W')
with pytest.raises(TypeError):
clnd.get_interval(('anyhting', 'anything'), length=1, period='W')
with pytest.raises(TypeError):
clnd.get_interval(length=1, period='W')
def test_interval_constructor_bad_arg_combinations_2(self):
clnd = tb_12_days()
with pytest.raises(TypeError):
clnd(('01 Jan 2017',))
with pytest.raises(TypeError):
clnd('01 Jan 2017', '05 Jan 2017')
with pytest.raises(TypeError):
clnd(('01 Jan 2017',), length=1)
with pytest.raises(TypeError):
clnd(('anyhting', 'anything'), length=1)
with pytest.raises(TypeError):
clnd(('02 Jan 2017',), period='W')
with pytest.raises(TypeError):
clnd(('anyhting', 'anything'), period='W')
with pytest.raises(TypeError):
clnd('anyhting', length=1, period='W')
with pytest.raises(TypeError):
clnd(('anyhting', 'anything'), length=1, period='W')
with pytest.raises(TypeError):
clnd(length=1, period='W')
class TestIntervalConstructorDirect(object):
def test_interval_direct_with_locs(self):
clnd = tb_12_days()
ivl = Interval(clnd, (2, 8), clnd.default_schedule)
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
def test_interval_direct_with_ws(self):
clnd = tb_12_days()
ivl = Interval(clnd,
(Workshift(clnd, 2), Workshift(clnd, 8)),
clnd.default_schedule)
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
def test_interval_direct_schedules(self):
clnd = tb_12_days()
my_schedule = clnd.add_schedule('my_schedule', lambda x: True)
ivl = Interval(clnd, (2, 8))
assert ivl.schedule.name == clnd.default_schedule.name
ivl = Interval(clnd, (2, 8), my_schedule)
assert ivl.schedule.name == 'my_schedule'
def test_interval_direct_mixed_args(self):
clnd = tb_12_days()
ivl = Interval(clnd, (2, clnd('08 Jan 2017')),
clnd.default_schedule)
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
def test_interval_direct_same_locs(self):
clnd = tb_12_days()
ivl = Interval(clnd, (2, 2), clnd.default_schedule)
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 2, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 3, 0, 0, 0)
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_direct_reverse_locs(self):
clnd = tb_12_days()
with pytest.raises(VoidIntervalError):
Interval(clnd, (8, 2), clnd.default_schedule)
def test_interval_direct_OOB_locs(self):
clnd = tb_12_days()
with pytest.raises(OutOfBoundsError):
Interval(clnd, (-1, 2), clnd.default_schedule)
with pytest.raises(OutOfBoundsError):
Interval(clnd, (8, 13), clnd.default_schedule)
with pytest.raises(OutOfBoundsError):
Interval(clnd, (-1, 13), clnd.default_schedule)
with pytest.raises(OutOfBoundsError):
Interval(clnd, (13, 25), clnd.default_schedule)
def test_interval_direct_bad_args(self):
clnd = tb_12_days()
with pytest.raises(AttributeError):
Interval('not a clnd', (2, 8), clnd.default_schedule)
with pytest.raises(TypeError):
Interval(clnd, (2, 8.5), clnd.default_schedule)
with pytest.raises(TypeError):
Interval(clnd, (2, '08 Jan 2017'), clnd.default_schedule)
with pytest.raises(IndexError):
Interval(clnd, (2,), clnd.default_schedule)
with pytest.raises(TypeError):
Interval(clnd, 'not a tuple', clnd.default_schedule)
# 'on_duty' is _Schedule.name but _Schedule is expected
with pytest.raises(TypeError):
_VoidInterval(clnd, (8, 2), 'on_duty')
class TestIntervalIteration(object):
def test_ivl_as_generator(self):
clnd = tb_12_days()
ivl = Interval(clnd, (1, 4))
ws_locs=[]
ws_sdl_is_ok=[]
for ws in ivl:
ws_locs.append(ws._loc)
ws_sdl_is_ok.append(ws.schedule.name == clnd.default_schedule.name)
assert ws_locs == [1, 2, 3, 4]
assert all(ws_sdl_is_ok)
def test_ivl_as_generator_change_schedule(self):
clnd = tb_12_days()
my_schedule = clnd.add_schedule('my_schedule', selector=lambda x:x>1)
ivl = Interval(clnd, (1, 4), schedule=my_schedule)
ws_locs=[]
ws_sdl_is_ok=[]
for ws in ivl:
ws_locs.append(ws._loc)
ws_sdl_is_ok.append(ws.schedule.name == my_schedule.name)
assert ws_locs == [1, 2, 3, 4]
assert all(ws_sdl_is_ok)
def test_ivl_workshift_generator(self):
clnd = tb_12_days()
ivl = Interval(clnd, (1, 4))
ws_locs=[]
for ws in ivl.workshifts():
ws_locs.append(ws._loc)
assert ws_locs == [1, 4]
ws_locs=[]
for ws in ivl.workshifts(duty='off'):
ws_locs.append(ws._loc)
assert ws_locs == [2, 3]
ws_locs=[]
for ws in ivl.workshifts(duty='any'):
ws_locs.append(ws._loc)
assert ws_locs == [1, 2, 3, 4]
def test_ivl_workshift_generator_no_such_duty(self):
clnd = tb_12_days()
ivl = Interval(clnd, (2, 3))
assert list(ivl.workshifts()) == []
all_on = clnd.add_schedule('all_on', lambda x: True)
ivl = Interval(clnd, (2, 3), schedule=all_on)
assert list(ivl.workshifts(duty='off')) == []
def test_workshift_generator_change_schedule(self):
clnd = tb_12_days()
all_on = clnd.add_schedule('all_on', lambda x: True)
ivl = Interval(clnd, (1, 4))
ws_locs=[]
ws_sdl_is_ok=[]
for ws in ivl.workshifts():
ws_locs.append(ws._loc)
ws_sdl_is_ok.append(ws.schedule.name == clnd.default_schedule.name)
assert ws_locs == [1, 4]
assert all(ws_sdl_is_ok)
ws_locs=[]
ws_sdl_is_ok=[]
for ws in ivl.workshifts(schedule=all_on):
ws_locs.append(ws._loc)
ws_sdl_is_ok.append(ws.schedule.name == all_on.name)
assert ws_locs == [1, 2, 3, 4]
assert all(ws_sdl_is_ok)
class TestIntervalToDataFrame(object):
def test_ivl_to_dataframe(self):
clnd = tb_12_days()
clnd.add_schedule('my_schedule', lambda x: True)
ivl = Interval(clnd, (2, 8))
clnd_df = clnd.to_dataframe(2, 8)
ivl_df = ivl.to_dataframe()
assert len(ivl_df) == len(ivl) == len(clnd_df)
assert list(ivl_df.columns) == list(clnd_df.columns)
assert 'my_schedule' in list(ivl_df.columns)
class TestVoidInterval(object):
def test_void_interval_with_locs(self):
clnd = tb_12_days()
ivl = _VoidInterval(clnd, (8, 2), clnd.default_schedule)
assert pd.isnull(ivl.start_time)
assert pd.isnull(ivl.end_time)
assert ivl._loc == (8,2)
assert len(ivl) == 0
def test_void_interval_with_ws(self):
clnd = tb_12_days()
ivl = _VoidInterval(clnd,
(Workshift(clnd, 8), Workshift(clnd, 2)),
clnd.default_schedule)
assert pd.isnull(ivl.start_time)
assert pd.isnull(ivl.end_time)
assert ivl._loc == (8,2)
assert len(ivl) == 0
def test_void_interval_mixed_args(self):
clnd = tb_12_days()
ivl = _VoidInterval(clnd, (Workshift(clnd, 8), 2),
clnd.default_schedule)
assert | pd.isnull(ivl.start_time) | pandas.isnull |
# -*- coding: utf-8 -*-
import pandas as pd
def data_preprocessing(books, ratings):
# change column names
books = books.drop(['Image-URL-S', 'Image-URL-M'], axis=1)
books.columns = ['ISBN', 'title', 'author', 'year', 'publisher', 'imageUrl']
# change incorrect data points
books.loc[books.year == 'DK Publishing Inc',:]
books.loc[books.ISBN == '0789466953','year'] = 2000
books.loc[books.ISBN == '0789466953','author'] = "<NAME>"
books.loc[books.ISBN == '0789466953','publisher'] = "DK Publishing Inc"
books.loc[books.ISBN == '0789466953','title'] = "DK Readers: Creating the X-Men, How Comic Books Come to Life (Level 4: Proficient Readers)"
books.loc[books.ISBN == '078946697X','year'] = 2000
books.loc[books.ISBN == '078946697X','author'] = "<NAME>"
books.loc[books.ISBN == '078946697X','publisher'] = "DK Publishing Inc"
books.loc[books.ISBN == '078946697X','title'] = "DK Readers: Creating the X-Men, How It All Began (Level 4: Proficient Readers)"
books.loc[books.ISBN == '2070426769','year'] = 2003
books.loc[books.ISBN == '2070426769','author'] = "<NAME>?©zio"
books.loc[books.ISBN == '2070426769','publisher'] = "Gallimard"
books.loc[books.ISBN == '2070426769','title'] = "Peuple du ciel, suivi de 'Les Bergers"
# change column data type
books.year= | pd.to_numeric(books.year, errors='coerce') | pandas.to_numeric |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings, strategies as st
import cudf
from cudf.tests import utils
repr_categories = utils.NUMERIC_TYPES + ["str", "category", "datetime64[ns]"]
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [0, 5, 10])
def test_null_series(nrows, dtype):
size = 5
mask = utils.random_bitmask(size)
data = cudf.Series(np.random.randint(1, 9, size))
column = data.set_mask(mask)
sr = cudf.Series(column).astype(dtype)
ps = sr.to_pandas()
pd.options.display.max_rows = int(nrows)
psrepr = ps.__repr__()
psrepr = psrepr.replace("NaN", "<NA>")
psrepr = psrepr.replace("NaT", "<NA>")
psrepr = psrepr.replace("None", "<NA>")
if (
dtype.startswith("int")
or dtype.startswith("uint")
or dtype.startswith("long")
):
psrepr = psrepr.replace(
str(sr._column.default_na_value()) + "\n", "<NA>\n"
)
print(psrepr)
print(sr)
assert psrepr.split() == sr.__repr__().split()
dtype_categories = [
"float32",
"float64",
"datetime64[ns]",
"str",
"category",
]
@pytest.mark.parametrize("ncols", [1, 2, 3, 4, 5, 10])
def test_null_dataframe(ncols):
size = 20
gdf = cudf.DataFrame()
for idx, dtype in enumerate(dtype_categories):
mask = utils.random_bitmask(size)
data = cudf.Series(np.random.randint(0, 128, size))
column = data.set_mask(mask)
sr = cudf.Series(column).astype(dtype)
gdf[dtype] = sr
pdf = gdf.to_pandas()
pd.options.display.max_columns = int(ncols)
pdfrepr = pdf.__repr__()
pdfrepr = pdfrepr.replace("NaN", "<NA>")
pdfrepr = pdfrepr.replace("NaT", "<NA>")
pdfrepr = pdfrepr.replace("None", "<NA>")
print(pdf)
print(gdf)
assert pdfrepr.split() == gdf.__repr__().split()
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [0, 1, 2, 9, 10, 11, 19, 20, 21])
def test_full_series(nrows, dtype):
size = 20
ps = pd.Series(np.random.randint(0, 100, size)).astype(dtype)
sr = cudf.from_pandas(ps)
pd.options.display.max_rows = int(nrows)
assert ps.__repr__() == sr.__repr__()
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [0, 1, 2, 9, 20 / 2, 11, 20 - 1, 20, 20 + 1])
@pytest.mark.parametrize("ncols", [0, 1, 2, 9, 20 / 2, 11, 20 - 1, 20, 20 + 1])
def test_full_dataframe_20(dtype, nrows, ncols):
size = 20
pdf = pd.DataFrame(
{idx: np.random.randint(0, 100, size) for idx in range(size)}
).astype(dtype)
gdf = cudf.from_pandas(pdf)
ncols, nrows = gdf._repr_pandas025_formatting(ncols, nrows, dtype)
pd.options.display.max_rows = int(nrows)
pd.options.display.max_columns = int(ncols)
assert pdf.__repr__() == gdf.__repr__()
assert pdf._repr_html_() == gdf._repr_html_()
assert pdf._repr_latex_() == gdf._repr_latex_()
@pytest.mark.parametrize("dtype", repr_categories)
@pytest.mark.parametrize("nrows", [9, 21 / 2, 11, 21 - 1])
@pytest.mark.parametrize("ncols", [9, 21 / 2, 11, 21 - 1])
def test_full_dataframe_21(dtype, nrows, ncols):
size = 21
pdf = pd.DataFrame(
{idx: np.random.randint(0, 100, size) for idx in range(size)}
).astype(dtype)
gdf = cudf.from_pandas(pdf)
pd.options.display.max_rows = int(nrows)
pd.options.display.max_columns = int(ncols)
assert pdf.__repr__() == gdf.__repr__()
@given(
st.lists(
st.integers(-9223372036854775808, 9223372036854775807),
min_size=1,
max_size=10000,
)
)
@settings(deadline=None)
def test_integer_dataframe(x):
gdf = cudf.DataFrame({"x": x})
pdf = gdf.to_pandas()
pd.options.display.max_columns = 1
assert gdf.__repr__() == pdf.__repr__()
assert gdf.T.__repr__() == pdf.T.__repr__()
@given(
st.lists(
st.integers(-9223372036854775808, 9223372036854775807), max_size=10000
)
)
@settings(deadline=None)
def test_integer_series(x):
sr = cudf.Series(x)
ps = pd.Series(x)
print(sr)
print(ps)
assert sr.__repr__() == ps.__repr__()
@given(st.lists(st.floats()))
@settings(deadline=None)
def test_float_dataframe(x):
gdf = cudf.DataFrame({"x": cudf.Series(x, nan_as_null=False)})
pdf = gdf.to_pandas()
assert gdf.__repr__() == pdf.__repr__()
@given(st.lists(st.floats()))
@settings(deadline=None)
def test_float_series(x):
sr = cudf.Series(x, nan_as_null=False)
ps = pd.Series(x)
assert sr.__repr__() == ps.__repr__()
@pytest.fixture
def mixed_pdf():
pdf = pd.DataFrame()
pdf["Integer"] = np.array([2345, 11987, 9027, 9027])
pdf["Date"] = np.array(
["18/04/1995", "14/07/1994", "07/06/2006", "16/09/2005"]
)
pdf["Float"] = np.array([9.001, 8.343, 6, 2.781])
pdf["Integer2"] = np.array([2345, 106, 2088, 789277])
pdf["Category"] = np.array(["M", "F", "F", "F"])
pdf["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"])
pdf["Boolean"] = np.array([True, False, True, False])
return pdf
@pytest.fixture
def mixed_gdf(mixed_pdf):
return cudf.from_pandas(mixed_pdf)
def test_mixed_dataframe(mixed_pdf, mixed_gdf):
assert mixed_gdf.__repr__() == mixed_pdf.__repr__()
def test_mixed_series(mixed_pdf, mixed_gdf):
for col in mixed_gdf.columns:
assert mixed_gdf[col].__repr__() == mixed_pdf[col].__repr__()
def test_MI():
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 4, 10),
"b": np.random.randint(0, 4, 10),
"c": np.random.randint(0, 4, 10),
}
)
levels = [["a", "b", "c", "d"], ["w", "x", "y", "z"], ["m", "n"]]
codes = cudf.DataFrame(
{
"a": [0, 0, 0, 0, 1, 1, 2, 2, 3, 3],
"b": [0, 1, 2, 3, 0, 1, 2, 3, 0, 1],
"c": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
}
)
pd.options.display.max_rows = 999
pd.options.display.max_columns = 0
gdf = gdf.set_index(cudf.MultiIndex(levels=levels, codes=codes))
pdf = gdf.to_pandas()
gdfT = gdf.T
pdfT = pdf.T
assert gdf.__repr__() == pdf.__repr__()
assert gdfT.__repr__() == pdfT.__repr__()
@pytest.mark.parametrize("nrows", [0, 1, 3, 5, 10])
@pytest.mark.parametrize("ncols", [0, 1, 2, 3])
def test_groupby_MI(nrows, ncols):
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
pdf = gdf.to_pandas()
gdg = gdf.groupby(["a", "b"]).count()
pdg = pdf.groupby(["a", "b"]).count()
pd.options.display.max_rows = nrows
pd.options.display.max_columns = ncols
assert gdg.__repr__() == pdg.__repr__()
assert gdg.T.__repr__() == pdg.T.__repr__()
@pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES)
@pytest.mark.parametrize("length", [0, 1, 10, 100, 1000])
def test_generic_index(length, dtype):
psr = pd.Series(
range(length),
index=np.random.randint(0, high=100, size=length).astype(dtype),
)
gsr = cudf.Series.from_pandas(psr)
assert psr.index.__repr__() == gsr.index.__repr__()
@pytest.mark.parametrize(
"index,expected_repr",
[
(
cudf.Index([1, 2, 3, None]),
"Int64Index([1, 2, 3, <NA>], dtype='int64')",
),
(
cudf.Index([None, 2.2, 3.324342, None]),
"Float64Index([<NA>, 2.2, 3.324342, <NA>], dtype='float64')",
),
(
cudf.Index([None, None, None], name="hello"),
"Float64Index([<NA>, <NA>, <NA>], dtype='float64', name='hello')",
),
(
cudf.Index([None], name="hello"),
"Float64Index([<NA>], dtype='float64', name='hello')",
),
(
cudf.Index([None], dtype="int8", name="hello"),
"Int8Index([<NA>], dtype='int8', name='hello')",
),
(
cudf.Index([None] * 50, dtype="object"),
"StringIndex([None None None None None None None None "
"None None None None None None\n None None None None None None "
"None None None None None None None None\n None None None None "
"None None None None None None None None None None\n None None "
"None None None None None None], dtype='object')",
),
(
cudf.Index([None] * 20, dtype="uint32"),
"UInt32Index([<NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, "
"<NA>,\n <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, "
"<NA>,\n <NA>, <NA>],\n dtype='uint32')",
),
(
cudf.Index(
[None, 111, 22, 33, None, 23, 34, 2343, None], dtype="int16"
),
"Int16Index([<NA>, 111, 22, 33, <NA>, 23, 34, 2343, <NA>], "
"dtype='int16')",
),
(
cudf.Index([1, 2, 3, None], dtype="category"),
"CategoricalIndex([1, 2, 3, <NA>], categories=[1, 2, 3], "
"ordered=False, dtype='category')",
),
(
cudf.Index([None, None], dtype="category"),
"CategoricalIndex([<NA>, <NA>], categories=[], ordered=False, "
"dtype='category')",
),
(
cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[ns]")),
"DatetimeIndex([1970-01-01 00:00:00.000000010, "
"1970-01-01 00:00:00.000000020,"
"\n 1970-01-01 00:00:00.000000030, <NA>],\n "
"dtype='datetime64[ns]')",
),
(
cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[s]")),
"DatetimeIndex([1970-01-01 00:00:10, "
"1970-01-01 00:00:20, 1970-01-01 00:00:30,\n"
" <NA>],\n dtype='datetime64[s]')",
),
(
cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[us]")),
"DatetimeIndex([1970-01-01 00:00:00.000010, "
"1970-01-01 00:00:00.000020,\n "
"1970-01-01 00:00:00.000030, <NA>],\n "
"dtype='datetime64[us]')",
),
(
cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[ms]")),
"DatetimeIndex([1970-01-01 00:00:00.010, "
"1970-01-01 00:00:00.020,\n "
"1970-01-01 00:00:00.030, <NA>],\n "
"dtype='datetime64[ms]')",
),
(
cudf.Index(np.array([None] * 10, dtype="datetime64[ms]")),
"DatetimeIndex([<NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, "
"<NA>,\n <NA>],\n dtype='datetime64[ms]')",
),
],
)
def test_generic_index_null(index, expected_repr):
actual_repr = index.__repr__()
assert expected_repr == actual_repr
@pytest.mark.parametrize(
"df,pandas_special_case",
[
( | pd.DataFrame({"a": [1, 2, 3]}, index=[10, 20, None]) | pandas.DataFrame |
"""
Usually we need to run collection_processor before the main app to process the whole collection of texts
and collect the data we need from the corpus
"""
import pandas as pd
import spacy
import toml
from collections import Counter
from multiprocessing import Pool
import time
from nltk.stem.snowball import SnowballStemmer
nlp = spacy.load("en_core_web_sm")
configs = toml.load("configs.toml")
collection_path = configs['Collection']['path']
num_of_rows = None
def load_abstracts():
cols_for_metadata = ["title", "journal", "authors", "abstract"]
metadata_df = pd.read_csv(collection_path, usecols=cols_for_metadata, nrows=num_of_rows)
# get subset with abstract included in the metadata.csv
df_abstract = metadata_df.dropna(subset=["abstract"])
return df_abstract
def process_single_abstract(text):
"""
Process a single abstract using spacy and Counter
return: Dataframe of counted words
"""
doc = nlp(text)
# eliminate number and punctuation and lemmatize
words = [tk.lemma_ for tk in doc if tk.is_alpha]
# count freq using python Counter
counter = Counter(words)
counter = [(w, freq) for w, freq in counter.items()]
counter_df = pd.DataFrame(counter, columns=['word', 'freq'])
return counter_df
def stemming_single_abstract(text):
sstemer = SnowballStemmer(language='english')
doc = nlp(text)
stemmed = [sstemer.stem(tk.text) for tk in doc if tk.is_alpha]
counter = Counter(stemmed)
counter = [(w, freq) for w, freq in counter.items()]
counter_df = pd.DataFrame(counter, columns=['word', 'freq'])
return counter_df
def main():
begin = time.time()
df = load_abstracts()
loading_time = time.time() - begin
print("Finished Load Abstracts")
print("Loading metadata.csv takes: {}".format(loading_time))
abs_list = df['abstract'].tolist()
with Pool() as p:
results = p.map(process_single_abstract, abs_list)
print("{} abstracts in metadata.csv".format(len(abs_list)))
print("Processed All Abstracts")
proc_time = time.time() - loading_time
print("Process time: {}".format(proc_time))
result = | pd.concat(results) | pandas.concat |
import pandas as pd
import json
import numpy as np
import os
def getState(temp=None,press=None,fluid="water",state="SLSV",tables="Wiley",preferTemp=False,debug=False,generic=False,Sat=-1.0e-5,qual=1.0):
# Evaluate input for errors
if temp == None and press == None:
raise ValueError("Either temperature (temp) or pressure (press) must be defined.")
if state not in ["SLSV","SHV","CL"]:
raise ValueError("State '"+state+"' not recognized.")
if state != "SLSV" and (temp == None or press == None):
raise ValueError("Both temperature (temp) and pressure (press) must be defined for given state.")
# Parse input
if temp != None and press == None:
preferTemp = True
if state == "SLSV":
state = "sat_liq"
pressORtemp = "press"
if preferTemp:
pressORtemp = "temp"
state += "_"+pressORtemp
if temp != None:
temp = float(temp)
if press != None:
press = float(press)
prefix = os.path.abspath(__file__).replace("catt.py","")+"data_tables/"
# Get corresponding data table(s)
data_filename = "_".join([fluid,state,tables,"metric"])
data = None
if "sat_liq" in state:
data = pd.read_csv(prefix+data_filename+".csv")
else:
with open(prefix+data_filename+".json") as f:
data = json.load(f)
# Construct return
rtn = pd.DataFrame()
# Get pressure entry for SHV and CL
if state in ["CL","SHV"]:
if str(float(press)) not in data.keys():
return "You're gonna wanna interpolate!"
else:
data = data[str(float(press))]
for key in data.keys():
for i in range(len(data[key])):
if data[key][i] == "Sat.":
data[key][i] == -1.0e-5
continue
data[key][i] = float(data[key][i])
d2 = {'press':press}
d2.update(data)
data = d2
data = pd.DataFrame(data=data)
if debug:
print(data)
# Get specific state
characteristic = press
charKey = 'press'
if state in ['CL','SHV'] or preferTemp:
characteristic = temp
charKey = 'temp'
if debug:
print('')
print(characteristic,charKey)
print('')
print(data[charKey])
for d in data[charKey]:
print(type(d))
chars = list(data[charKey])
if 'Sat.' in chars:
chars[chars.index('Sat.')] = Sat
chars = np.array(chars)
upper = chars[chars > characteristic].min()
lower = chars[chars <= characteristic].max()
if lower == -1.0e-5:
raise ValueError('Must provide saturation value.')
if lower == characteristic:
rtn = data[data[charKey]==characteristic]
else:
upper_index = list(chars).index(upper)
lower_index = list(chars).index(lower)
state = {}
for key in data.keys():
state[key] = (data[key][upper_index]-data[key][lower_index])/(upper-lower) * (temp-lower) + data[key][lower_index]
rtn = pd.DataFrame(data=state,index=[0])
if state not in ['CL','SHV'] and not generic:
keys = ['temp','press','<KEY>']
dat = [rtn['temp'],rtn['press']]
v = rtn['v_f']*1.0e-3 + qual*(rtn['v_g']-rtn['v_f']*1.0e-3)
u = rtn['u_f'] + qual*(rtn['u_g']-rtn['u_f'])
h = rtn['h_f'] + qual*(rtn['h_g']-rtn['h_f'])
s = rtn['s_f'] + qual*(rtn['s_g']-rtn['s_f'])
dat += [v,u,h,s]
state = {key:datum for key,datum in zip(keys,dat)}
rtn = | pd.DataFrame(data=state) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
更新数据范围列表,用于下载数据时减少重复下载
日线数据路径是 IF/IF1804.h5
分钟数据路径是 IF1804/IF1804_20180226.h5
已经退市的合约,只要做过一次,后面就没有必要再做了
但数据还是有问题,不活跃的合约,最后几天完全没有行情了
"""
import os
import sys
import pandas as pd
from datetime import datetime
from kquant_data.utils.xdatetime import yyyyMMddHHmm_2_datetime, yyyyMMdd_2_datetime
from kquant_data.config import __CONFIG_H5_FUT_SECTOR_DIR__
from kquant_data.future.symbol import wind_code_2_InstrumentID
from kquant_data.xio.csv import read_datetime_dataframe
from kquant_data.utils.xdatetime import tic, toc
# 解决Python 3.6的pandas不支持中文路径的问题
print(sys.getfilesystemencoding()) # 查看修改前的
try:
sys._enablelegacywindowsfsencoding() # 修改
print(sys.getfilesystemencoding()) # 查看修改后的
except:
pass
path_ipo_last_trade = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, 'ipo_last_trade_trading.csv')
def get_in_file_day(file_path, wind_code, df):
print(file_path)
# 需要
df_h5 = pd.read_hdf(file_path)
df_h5.dropna(axis=0, how='all', thresh=3, inplace=True)
if df_h5.empty:
return df
first = yyyyMMddHHmm_2_datetime(df_h5['DateTime'].iloc[0])
last = yyyyMMddHHmm_2_datetime(df_h5['DateTime'].iloc[-1])
try:
df.loc[wind_code]['first'] = min(first, df.loc[wind_code]['first'])
df.loc[wind_code]['last'] = max(last, df.loc[wind_code]['last'])
except:
df.loc[wind_code] = None
df.loc[wind_code]['first'] = first
df.loc[wind_code]['last'] = last
return df
def process_one_file(filename, dirpath, df):
shotname, extension = os.path.splitext(filename)
if extension != '.h5':
return 'continue', df
try:
wind_code, date = shotname.split('_')
except:
wind_code = shotname
path_h5 = os.path.join(dirpath, filename)
# 文件
fsize = os.path.getsize(path_h5)
if fsize == 2608:
return 'continue', df
print(path_h5)
# 需要
df_h5 = pd.read_hdf(path_h5)
df_h5.dropna(axis=0, how='all', thresh=3, inplace=True)
if df_h5.empty:
return 'continue', df
first = yyyyMMddHHmm_2_datetime(df_h5['DateTime'].iloc[0])
last = yyyyMMddHHmm_2_datetime(df_h5['DateTime'].iloc[-1])
try:
df.loc[wind_code]['first'] = min(first, df.loc[wind_code]['first'])
df.loc[wind_code]['last'] = max(last, df.loc[wind_code]['last'])
except:
df.loc[wind_code] = None
df.loc[wind_code]['first'] = first
df.loc[wind_code]['last'] = last
return 'break', df
def get_in_dir_min(dir_path, df, load_first):
for _dirpath, _dirnames, _filenames in os.walk(dir_path):
length = len(_filenames)
curr_pos = 0
# 正序得到开始时间
if load_first:
for i in range(length):
curr_pos = i
filename = _filenames[i]
control, df = process_one_file(filename, _dirpath, df)
if control == 'continue':
continue
elif control == 'break':
break
# 逆序得到结束时间
# for i in range(length - 1, -1, -1):
for i in reversed(range(curr_pos + 1, length)):
# print(i)
filename = _filenames[i]
control, df = process_one_file(filename, _dirpath, df)
if control == 'continue':
continue
elif control == 'break':
break
return df
def get_first_last_min(root_path, date_table, df):
"""
遍历文件夹,得到合约的数据开始时间与结束时间
兼容文件夹下都是同一合约,分钟数据
兼容文件夹下都是同产品,日数据
:param root_path:
:return:
"""
for dirpath, dirnames, filenames in os.walk(root_path):
for dirname in dirnames:
# if dirname != 'PM1801':
# continue
dirpath_dirname = os.path.join(dirpath, dirname)
load_first = False
try:
row = date_table.loc[dirname]
# 最后一天如何判断数据已经完成?
if row['lasttrade_date'] + pd.Timedelta('14.5h') < row['last']:
print(row['last'])
continue
except:
load_first = True
df = get_in_dir_min(dirpath_dirname, df, load_first)
path = os.path.join(root_path, 'first_last.csv')
df.index.name = 'product'
df.to_csv(path)
return df
def get_first_last_day(root_path, date_table, df):
"""
遍历文件夹,得到合约的数据开始时间与结束时间
兼容文件夹下都是同一合约,分钟数据
兼容文件夹下都是同产品,日数据
:param root_path:
:return:
"""
for dirpath, dirnames, filenames in os.walk(root_path):
for dirname in dirnames:
dirpath_dirname = os.path.join(dirpath, dirname)
for _dirpath, _dirnames, _filenames in os.walk(dirpath_dirname):
for filename in _filenames:
dirpath_filename = os.path.join(_dirpath, filename)
shotname, extension = os.path.splitext(filename)
if extension != '.h5':
continue
try:
row = date_table.loc[shotname]
# 最后一天如何判断数据已经完成?
if row['lasttrade_date'] == row['last']:
print(row['last'])
continue
except:
pass
df = get_in_file_day(dirpath_filename, shotname, df)
path = os.path.join(root_path, 'first_last.csv')
df.index.name = 'product'
df.to_csv(path)
return df
def CZCE_3to4(x):
if x['exchange'] == 'CZC':
x['InstrumentID'] = '%s%s%s' % (x['InstrumentID'][0:2], str(x['lasttrade_date'])[2], x['InstrumentID'][-3:])
return x
def load_ipo_last_trade_trading():
df_csv = pd.read_csv(path_ipo_last_trade, encoding='utf-8-sig')
df = wind_code_2_InstrumentID(df_csv, 'wind_code')
df = df.apply(CZCE_3to4, axis=1)
df = df.set_index(['InstrumentID'])
return df
if __name__ == '__main__':
ipo_last_trade = load_ipo_last_trade_trading()
ipo_last_trade['ipo_date'] = ipo_last_trade['ipo_date'].apply(lambda x: yyyyMMdd_2_datetime(x))
ipo_last_trade['lasttrade_date'] = ipo_last_trade['lasttrade_date'].apply(lambda x: yyyyMMdd_2_datetime(x))
ipo_last_trade['ipo_date'] = pd.to_datetime(ipo_last_trade['ipo_date'])
ipo_last_trade['lasttrade_date'] = | pd.to_datetime(ipo_last_trade['lasttrade_date']) | pandas.to_datetime |
from DSAE import Discriminative_SAE
import DSAE.Pre_process as Pre_process
import DSAE.To_full as To_full
import DSAE.Dropout as Dropout
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error
from scipy.stats import pearsonr
from sklearn.metrics.pairwise import cosine_similarity
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input_true', type=str, default='data/test.csv')
parser.add_argument('--input_raw', type=str, default='None')
parser.add_argument('--outputdir', type=str, default='data')
parser.add_argument('--dim1', type=int, default=600)
parser.add_argument('--dim2', type=int, default=256)
parser.add_argument('--epoch1', type=int, default=3000)
parser.add_argument('--epoch2', type=int, default=1000)
parser.add_argument('--learning_rate', type=float, default=4e-3)
parser.add_argument('--batch', type=int, default=64)
parser.add_argument('--print_step', type=int, default=200)
args = parser.parse_args()
def main(data1, data2, outdir):
######################## Read Data ########################
data_T = pd.read_csv(data1, index_col=0)
if(data2 == 'None'):
data_raw = Dropout.main(data_T, outdir)
else:
data_raw = | pd.read_csv(data2, index_col=0) | pandas.read_csv |
import os
import json
import click
import pandas as pd
import sqlalchemy as alchemy
from data_layer import Redshift as SQL
from typing import Optional, Dict, List, Tuple
from enum import Enum
class EntityType(Enum):
ad = 'ad'
adset = 'adset'
campaign = 'campaign'
@classmethod
def from_tag_data(cls, tags: pd.DataFrame) -> any:
return cls.ad if 'ad_id' in tags.columns else cls.adset if 'adset_tag' in tags.columns else cls.campaign
@property
def identifier_columns(self) -> Optional[Dict[str, any]]:
return {
'channel': alchemy.VARCHAR(127),
f'{self.value}_id': alchemy.VARCHAR(127),
}
@property
def columns(self) -> Optional[Dict[str, any]]:
return {
'company_identifier': alchemy.VARCHAR(127),
'app': alchemy.VARCHAR(127),
**self.identifier_columns,
f'{self.value}_tag': alchemy.VARCHAR(255),
f'{self.value}_subtag': alchemy.VARCHAR(255),
}
@property
def id_column_names(self) -> List[str]:
return [f'{self.value}_id']
@property
def tag_column_names(self) -> List[str]:
return [f'{self.value}_tag', f'{self.value}_subtag']
@property
def update_column_names(self) -> List[str]:
return self.tag_column_names + ['upload_group']
@property
def table_name(self) -> str:
if self is EntityType.ad:
return 'tag_ads'
elif self is EntityType.adset:
return 'tag_adsets'
elif self is EntityType.campaign:
return 'tag_campaigns'
@property
def upload_table_name(self) -> str:
return f'upload_{self.table_name}'
@property
def restore_table_name(self) -> str:
return f'restore_{self.table_name}'
def convert_id_columns(df: pd.DataFrame, col_names: List[str]):
for name in col_names:
df[name] = df[name].apply(lambda id: json.loads(id) if type(id) is str else None)
df.drop(df.index[df[name].isna()], inplace=True)
def strip_empty_tags(df: pd.DataFrame, col_names: List[str], verbose: bool=False):
if not col_names:
return
df[col_names] = df[col_names].fillna(value='')
for name in col_names:
df[name] = df[name].apply(lambda s: s.strip())
stripped_df = df
for name in col_names:
stripped_df = stripped_df[stripped_df[name] == '']
if not stripped_df.empty and verbose:
print(f'Found {len(stripped_df)} empty tag rows')
def write_output(df: pd.DataFrame, file_name: str, description: str):
dirname = os.path.dirname(__file__)
path = os.path.join(dirname, 'output', file_name)
df.to_csv(path, index=False)
print(f'{len(df)} {description} written to {path}')
def drop_duplicates(df: pd.DataFrame, original_df: pd.DataFrame, entity: EntityType, output_prefix: str, interactive: bool=False):
starting_rows = len(df)
df.drop_duplicates(subset=list(entity.identifier_columns.keys()) + entity.tag_column_names, inplace=True)
dropped_rows = starting_rows - len(df)
if dropped_rows > 0 and interactive:
print(f'Dropped {dropped_rows} duplicate rows with identical tags.')
duplicated_series = df.duplicated(subset=entity.identifier_columns.keys(), keep=False)
duplicate_rows = | pd.DataFrame(df[duplicated_series.values]) | pandas.DataFrame |
#!/usr/bin/env python
"""
Script to georeference Nikon D800 images using a GPX track.
Default arguments (filepaths) may have to be edited in the main() function.
REQUIREMENT: Needs to be run on Linux right now and have exiftool installed.
"""
import datetime
import os
import subprocess
import pandas as pd
import gpxpy
# What suffix to relate raw images to
RAW_SUFFIX = "NEF"
OTHER_SUFFIXES = ["jpg", "JPG", "jpeg", "tiff", "tif"]
def check_if_valid_filename(filename: str):
"""Check if a filename corresponds to an image file."""
if len(filename.split(".")) < 2: # Equals true if file does not have a suffix, e.g.: "file", as opposed to "file.jpg"
return False
suffix = filename.split(".")[-1]
# Check if the suffix is in the list of valid suffixes
if suffix not in [RAW_SUFFIX] + OTHER_SUFFIXES:
return False
return True # If it is valid
def get_cam_times(directory: str) -> pd.Series:
"""Get the EXIF capture time from each raw image in a directory as a Pandas Series."""
# Create an empty Pandas series with datetime as its data type
# Index is image filename, date taken is data
cam_times = pd.Series(dtype="datetime64[ns]")
files = os.listdir(directory) # Get list of all files in the directory
for i, file in enumerate(files): # Loop over all files
# Check if valid image file
if not check_if_valid_filename(file):
continue
# For every 50 images, print a progress update (process may take a while)
if i % 50 == 0 and i != 0:
print(f"File {i} / {len(files)}")
# Get the full path ("image_dir/image.jpg")
full_path = os.path.join(directory, file)
# Use exiftool in a shell environment, filter out the "Create Date" and take the last entry (there are duplicates of the same one)
exiftool_output = subprocess.check_output(f"exiftool {full_path} | grep 'Create Date' | tail -1", shell=True)
# Do some string magic to extract only the date and time from the output
date = exiftool_output.decode("utf-8").split(" : ")[1].strip()
# Convert to a DateTime object and add to the series
cam_times[file] = pd.to_datetime(date, format="%Y:%m:%d %H:%M:%S.%f")
return cam_times
def get_time_diff(photo_sync_directory: str, gps_time_file: str) -> datetime.datetime:
"""Get the time difference between the GPS time and the camera's internal time by comparing photographs of waypoints."""
# Create empty Pandas Dataframe
times = pd.DataFrame(columns=["cam", "gps"], dtype="datetime64")
# Get the times from the camera and add them to the dataframe
cam_times = get_cam_times(photo_sync_directory)
times.loc[:, "cam"] = cam_times
# Open the GPS time file and add the times to the dataframe
# It is structured as: *picture filename*,*equivalent gps time*
with open(gps_time_file) as file:
for line in file.readlines():
cam, gps_time = line.split(",")
times.loc[cam, "gps"] = pd.to_datetime(gps_time, format="%Y-%m-%d %H:%M:%S")
# Get the time differences
# Type correction (.astype) may not be neccessary anymore.
times["diff"] = times["cam"] - times["gps"].astype("datetime64")
# Get the mean time offset
diff = times["diff"].mean()
# Round the diff to nearest 1/10th of a second
# The Nikon camera data is only shown to 1/10th of a second.
offset = round(diff.microseconds / 1e5) * int(1e5) - diff.microseconds
diff += | pd.Timedelta(microseconds=offset) | pandas.Timedelta |
import os
from functools import reduce
import pandas as pd
import numpy as np
from . import settings
def get_data(cryptocurrency, fillna=0):
crypto_path = os.path.join(settings.RESOURCES_DIR, cryptocurrency)
# Currency related data frames
price_df = _read_csv(os.path.join(crypto_path, 'price.csv'))
_lower_headers(price_df)
# price_df = _floaterize_prices(price_df)
price_df['date'] = pd.to_datetime(price_df['date'])
transactions_df = _read_csv(os.path.join(crypto_path, 'transactions.csv'))
_lower_headers(transactions_df)
transactions_df['date'] = | pd.to_datetime(transactions_df['date']) | pandas.to_datetime |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = dict(sep="\t")
url = (
"https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = dict(sep="\t")
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = "{}.csv".format(tm.rands(10))
msg = f"File {path} does not exist" if parser.engine == "c" else r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
filename = e.value.filename
assert path == filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == np.object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(all_parsers):
# see gh-12215
df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
data = df.to_csv(index=False)
parser = all_parsers
for precision in parser.float_precision_choices:
df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
tm.assert_frame_equal(df_roundtrip, df)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
"00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166",
],
columns=["ID"],
)
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
)
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
)
def test_outside_int64_uint64_range(all_parsers, val):
# These numbers fall just outside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
result = next(iter(parser.read_csv(data, chunksize=nrows)))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected,msg",
[
# gh-10728: WHITESPACE_LINE
(
"a,b,c\n4,5,6\n ",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# gh-10548: EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL_NOP
(
"a,b,c\n4,5,6\n\r",
dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_COMMENT
(
"a,b,c\n4,5,6#comment",
dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# SKIP_LINE
(
"a,b,c\n4,5,6\nskipme",
dict(skiprows=[2]),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
dict(comment="#", skip_blank_lines=False),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# IN_FIELD
(
"a,b,c\n4,5,6\n ",
dict(skip_blank_lines=False),
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL
(
"a,b,c\n4,5,6\n\r",
dict(skip_blank_lines=False),
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
None,
),
# ESCAPED_CHAR
(
"a,b,c\n4,5,6\n\\",
dict(escapechar="\\"),
None,
"(EOF following escape character)|(unexpected end of data)",
),
# ESCAPE_IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"\\',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
# IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"',
dict(escapechar="\\"),
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
],
ids=[
"whitespace-line",
"eat-line-comment",
"eat-crnl-nop",
"eat-comment",
"skip-line",
"eat-line-comment",
"in-field",
"eat-crnl",
"escaped-char",
"escape-in-quoted-field",
"in-quoted-field",
],
)
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", dict(), None),
("", dict(usecols=["X"]), None),
(
",,",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
dict(
header=None,
delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True,
),
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
dict(
delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True
),
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_verbose_read(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
parser.read_csv(StringIO(data), verbose=True)
captured = capsys.readouterr()
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 3 NA values in column a\n"
def test_verbose_read2(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
parser.read_csv(StringIO(data), verbose=True, index_col=0)
captured = capsys.readouterr()
# Engines are verbose in different ways.
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 1 NA values in column a\n"
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = dict(squeeze=True, header=None)
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path, "r") as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,thousands,decimal",
[
(
"""A|B|C
1|2,334.01|5
10|13|10.
""",
",",
".",
),
(
"""A|B|C
1|2.334,01|5
10|13|10,
""",
".",
",",
),
],
)
def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
parser = all_parsers
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
result = parser.read_csv(
StringIO(data), sep="|", thousands=thousands, decimal=decimal
)
tm.assert_frame_equal(result, expected)
def test_euro_decimal_format(all_parsers):
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
result = parser.read_csv(StringIO(data), sep=";", decimal=",")
expected = DataFrame(
[
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
],
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_inf_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
expected = DataFrame(
{"A": [float("inf"), float("-inf")] * 5},
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_infinity_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,Infinity
b,-Infinity
c,+Infinity
"""
expected = DataFrame(
{"A": [float("infinity"), float("-infinity"), float("+infinity")]},
index=["a", "b", "c"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
data = "\n" * nrows
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data))
def test_memory_map(all_parsers, csv_dir_path):
mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
parser = all_parsers
expected = DataFrame(
{"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
)
result = parser.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(result, expected)
def test_null_byte_char(all_parsers):
# see gh-2741
data = "\x00,foo"
names = ["a", "b"]
parser = all_parsers
if parser.engine == "c":
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
def test_temporary_file(all_parsers):
# see gh-13398
parser = all_parsers
data = "0 0"
new_file = TemporaryFile("w+")
new_file.write(data)
new_file.flush()
new_file.seek(0)
result = parser.read_csv(new_file, sep=r"\s+", header=None)
new_file.close()
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
# see gh-5500
parser = all_parsers
data = "a,b\n1\x1a,2"
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(all_parsers):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
path = "__{}__.csv".format(tm.rands(10))
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
def test_file_handle_string_io(all_parsers):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
data = "a,b\n1,2"
fh = StringIO(data)
parser.read_csv(fh)
assert not fh.closed
def test_file_handles_with_open(all_parsers, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
for mode in ["r", "rb"]:
with open(csv1, mode) as f:
parser.read_csv(f)
assert not f.closed
def test_invalid_file_buffer_class(all_parsers):
# see gh-15337
class InvalidBuffer:
pass
parser = all_parsers
msg = "Invalid file path or buffer object type"
with pytest.raises(ValueError, match=msg):
parser.read_csv(InvalidBuffer())
def test_invalid_file_buffer_mock(all_parsers):
# see gh-15337
parser = all_parsers
msg = "Invalid file path or buffer object type"
class Foo:
pass
with pytest.raises(ValueError, match=msg):
parser.read_csv(Foo())
def test_valid_file_buffer_seems_invalid(all_parsers):
# gh-16135: we want to ensure that "tell" and "seek"
# aren't actually being used when we call `read_csv`
#
# Thus, while the object may look "invalid" (these
# methods are attributes of the `StringIO` class),
# it is still a valid file-object for our purposes.
class NoSeekTellBuffer(StringIO):
def tell(self):
raise AttributeError("No tell method")
def seek(self, pos, whence=0):
raise AttributeError("No seek method")
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoSeekTellBuffer(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs",
[dict(), dict(error_bad_lines=True)], # Default is True. # Explicitly pass in.
)
@pytest.mark.parametrize(
"warn_kwargs", [dict(), dict(warn_bad_lines=True), dict(warn_bad_lines=False)]
)
def test_error_bad_lines(all_parsers, kwargs, warn_kwargs):
# see gh-15925
parser = all_parsers
kwargs.update(**warn_kwargs)
data = "a\n1\n1,2,3\n4\n5,6,7"
msg = "Expected 1 fields in line 3, saw 3"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_warn_bad_lines(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
assert "Skipping line 5" in captured.err
def test_suppress_error_output(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(
StringIO(data), error_bad_lines=False, warn_bad_lines=False
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
"""NTCIR13-MedWeb"""
from typing import *
import pandas as pd
import numpy as np
import os
import sys
from pathlib import Path
import functools
from collections import Counter
from copy import deepcopy
import pickle
import click
from tqdm import tqdm
import sklearn
from sklearn import model_selection
from sklearn.model_selection import RandomizedSearchCV
from sklearn import preprocessing
from sklearn.feature_extraction import DictVectorizer
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn import metrics
from sklearn.externals import joblib
import sklearn.utils
import pyknp
from natto import MeCab, MeCabNode
import spacy
from features.basics import ngram_features
from krankenfinder.features.ja_semantics import SemanticFeatures
from krankenfinder.features import bow_juman, bow_spacy, rule_based
from krankenfinder.utils.normalize import normalize_neologd
from krankenfinder import postprocessor
import logging
import logging.config
# This logger will be overridden by logger defined in __main__
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logging.captureWarnings(True)
logger.propagate = False
try:
MECAB_OPTS = "-F %m,%f[0],%f[1],%f[6] -d {}".format(os.environ['NEOLOGD'])
except KeyError:
MECAB_OPTS = "-F %m,%f[0],%f[1],%f[6] -d /usr/lib/mecab/dic/mecab-ipadic-neologd/"
LABELCOLS = ['Influenza', 'Diarrhea', 'Hayfever', 'Cough', 'Headache', 'Fever', 'Runnynose', 'Cold']
Model = Union[RandomForestClassifier, RandomizedSearchCV]
def load_dataset(corpus_path: Path) -> Optional[pd.DataFrame]:
"""Load dataset from given xlsx or csv files, as dataframe
Parameters
----------
corpus_path : pathlib.Path
dataset file's path
Returns
-------
loaded dataframe
"""
def get_sheetname(p: Path) -> str:
if p.name.startswith('ja_train'):
return 'ja_train'
elif p.name.startswith('ja_test'):
return 'ja_test'
elif p.name.startswith('en_train'):
return 'en_train'
elif p.name.startswith('en_test'):
return 'en_test'
else:
return ''
if str(corpus_path.suffix == '.xlsx'):
sheetname = get_sheetname(corpus_path)
raw = pd.read_excel(str(corpus_path), sheetname=sheetname)
else:
raise NotImplementedError('Only xlsx corpus is allowed.')
logger.info('Data loaded: {}'.format(str(corpus_path.name)))
return raw
def _parser_func_mecab_detailed(parser: MeCab) -> Callable[[str], List[Tuple[str, str]]]:
def parse_to_morphs(s: str) -> List[Tuple[str, str]]:
return [tuple(l.split('\t')) for l in parser.parse(normalize_neologd(s)).split('\n')]
return parse_to_morphs
def _get_lemma(node: MeCabNode) -> str:
"""Assuming format "<surface>,<pos>,<posd>,<lemma>" """
try:
return node.feature.split(',')[3]
except IndexError:
logger.error(node.feature)
return ''
def _pos_included(node: MeCabNode) -> bool:
exclude_pos = {'記号'}
exclude_posd = {'格助詞', '接続助詞'}
suf, pos, posd, lemma = node.feature.split(',')
if pos in exclude_pos:
return False
if posd in exclude_posd:
return False
return True
def _parser_func_mecab(parser: MeCab) -> Callable[[str], List[str]]:
def parse_to_surf(s: str) -> List[str]:
return [_get_lemma(node) for node in parser.parse(normalize_neologd(s), as_nodes=True)
if node.is_nor() and _pos_included(node)]
return parse_to_surf
def _binarize_pn(df: pd.DataFrame) -> pd.DataFrame:
for c in LABELCOLS:
# cast to float64 for later use with sklearn
df[c] = df[c].apply(lambda s: 1 if s == 'p' else 0).astype(np.float64)
return df
def _pp_ja(df: pd.DataFrame, userdict: str = None, jumanpp: bool = False) -> pd.DataFrame:
if not jumanpp: # use mecab-neologd by default
if userdict:
mecab = MeCab('{} -u {}'.format(MECAB_OPTS, userdict))
logger.info('Using customdict {}'.format(userdict))
else:
mecab = MeCab('{}'.format(MECAB_OPTS))
parser = _parser_func_mecab(mecab)
# df['raw'] = df['Tweet'].apply(normalize_neologd) # KNP fails to parse with some hankaku characters
else: # use jumanpp
parser = bow_juman.parser_func_jumanpp(lemmatize=True)
logger.info('Using Juman++ instead of MeCab')
tqdm.pandas()
df['words'] = df['Tweet'].progress_apply(parser)
df['raw'] = df['Tweet'].copy() # TODO: add some preprocess for KNP
return df
def _pp_en(df: pd.DataFrame) -> pd.DataFrame:
tokenizer_ = bow_spacy.parser_func_en_suf()
df['words'] = df['Tweet'].apply(tokenizer_)
# TODO: Implement normalization for English tweets if needed.
df['raw'] = df['Tweet'].copy()
return df
def _check_lang(df: pd.DataFrame) -> str:
return df['ID'].iloc[0][-2:]
def preprocess_df(df: pd.DataFrame, userdict: str = None, jumanpp: bool = False) -> pd.DataFrame:
"""Perform preprocessing for given dataframe,
including, binarizing p/n labels to 1/0, normalization (JP), adding column of tokenized sentence.
"""
df = _binarize_pn(df)
if _check_lang(df) == 'ja':
df = _pp_ja(df, userdict=userdict, jumanpp=jumanpp)
elif _check_lang(df) == 'en':
df = _pp_en(df)
else:
raise NotImplementedError('Not implemented for this language')
logger.info('Finished preprocessing')
return df
def train_test_split(df: pd.DataFrame, ratio: float = 0.8, random_seed: Optional[int] = None) \
-> Tuple[pd.DataFrame, pd.DataFrame]:
"""Perform train/test split """
split_id = int(len(df) * ratio)
_df = sklearn.utils.shuffle(df.copy(), random_state=random_seed)
train = _df.iloc[:split_id]
test = _df.iloc[split_id:]
return train, test
def add_surface_feature(df: pd.DataFrame) -> pd.DataFrame:
"""Add surface-bag-of-words column to given dataframe"""
def get_counts_of_words(l: List[str]) -> List[Tuple[str, int]]:
return list(Counter(l).items())
tqdm.pandas()
df['f_surface'] = df['words'].progress_apply(get_counts_of_words)
logger.info('Extracted word-surface features')
return df
def add_ngram_feature(df: pd.DataFrame, n: int = 3) -> pd.DataFrame:
ngram_func = functools.partial(ngram_features, n=n, padding=True)
df['f_{}gram'.format(n)] = df['words'].apply(ngram_func)
logger.info('Extracted {}gram features'.format(n))
return df
def add_semantic_feature(df: pd.DataFrame, verbose=False, logger=None, jumanpp=False) -> pd.DataFrame:
"""Add semantic feature column to given dataframe
.. note:: currently implemented only for Japanese tweets
"""
fe = SemanticFeatures(verbose=verbose, logger=logger, jumanpp=jumanpp)
logger.info('Started extracting semantic features')
tqdm.pandas()
def get_semantic_featuredict(s: str) -> List[Tuple[str, int]]:
_ = fe.pas_features(s) # type: Dict[str, int]
return list(_.items())
df['f_semantic'] = df['raw'].progress_apply(get_semantic_featuredict)
logger.info('{}: Extracted semantic features'.format(__name__))
return df
def add_rulebased_feature(df: pd.DataFrame) -> pd.DataFrame:
logger.info('Extracting Rule-based features')
tqdm.pandas()
df['f_rules'] = df['raw'].progress_apply(rule_based.rulebased_features_ja)
return df
def _merge_feature_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Prerequisite: df must have 'features' column
WARNING: this function will modify dataframe inplace, better to copy df beforehand."""
feature_columns = sorted([cname for cname in df.columns if cname.startswith('f_')], reverse=True)
if not feature_columns:
return df
elif len(feature_columns) == 1:
cn = feature_columns.pop()
df['features'] = pd.Series(df['features'] + df[cn])
df.drop(cn, 1, inplace=True)
return df
else:
cn1 = feature_columns[0]
cn2 = feature_columns[1]
df['features'] = pd.Series(df['features'] + df[cn2] + df[cn1])
df.drop([cn1, cn2], 1, inplace=True)
_merge_feature_columns(df)
def feature_extraction(df: pd.DataFrame,
surface=True,
ngram_n: Union[Tuple[int], Tuple[int, int], None] = (3,),
rule_based=False,
semantic=True,
logger=None,
verbose=False) -> Tuple[np.array, pd.DataFrame]:
# use deepcopy instead of df.copy() because it doesn't recursively copy objects even when deep=True.
df = deepcopy(df)
if surface:
df = add_surface_feature(df)
if ngram_n:
for n in ngram_n:
df = add_ngram_feature(df, n=n)
if semantic:
lang = _check_lang(df)
if lang == 'ja':
df = add_semantic_feature(df, verbose=verbose, logger=logger, jumanpp=False)
if rule_based:
lang = _check_lang(df)
if lang == 'ja':
df = add_rulebased_feature(df)
df['features'] = np.empty((len(df['ID']), 0)).tolist()
_merge_feature_columns(df)
return df['features'].values, df
def get_labels(df: pd.DataFrame) -> np.array:
return df[LABELCOLS].values
def define_model(n_random_search: int = 100, n_jobs: int = None) -> Model:
# TODO: needs refinements
rf = RandomForestClassifier(random_state=None)
# Or Extremely Randomized Trees, but currently no big difference in terms of performance.
# rf = ExtraTreesClassifier(random_state=None)
_n_estimators = list(range(8, 128, 4))
_max_depth = list(range(8, 32, 1))
search_space = dict(
n_estimators=_n_estimators,
criterion=['gini', 'entropy'],
max_features=['auto', 'log2', 0.5, None],
max_depth=_max_depth
)
if n_jobs:
ncores = n_jobs
else:
ncores = joblib.cpu_count()
rfcv = model_selection.RandomizedSearchCV(estimator=rf,
param_distributions=search_space,
n_iter=n_random_search,
n_jobs=ncores,
cv=5,
verbose=1
)
return rfcv
def get_preds_and_probs(model: Model, X_test, use_postprocess: bool = False):
predictions = model.predict(X_test)
if use_postprocess:
predictions = postprocessor.apply_pp(predictions)
probabilities = model.predict_proba(X_test)
return predictions, probabilities
def evaluate_on_testset(model: Model, X_test, y_test, use_postprocess: bool = False) -> Tuple[str, np.array, np.array]:
predictions, probabilities = get_preds_and_probs(model, X_test, use_postprocess)
report = metrics.classification_report(y_test, predictions, target_names=LABELCOLS)
return report, predictions, probabilities
def _get_types(v: int) -> str:
"""Get string representations of error-types from integer-encoding"""
if v == 0:
return 'TN'
elif v == -1:
return 'FP'
elif v == 1:
return 'FN'
elif v == 2:
return 'TP'
else:
return 'NA'
def error_analysis(df_test: pd.DataFrame, predictions: np.array, model: Model) -> pd.DataFrame:
"""Get detailed information for analysing error cases."""
# Prefix for columns
P_G = 'gold_'
P_P = 'pred_'
P_C = 'code_'
_columns = ['ID', 'Tweet'] + LABELCOLS
rename_dic = {org: P_G + org for org in LABELCOLS}
df = df_test.loc[:, _columns].copy()
df = df.rename(columns=rename_dic)
# Add prediction columns to df
for c in range(predictions.shape[1]):
col = | pd.Series(predictions[:, c], index=df_test['ID'], dtype=np.float64) | pandas.Series |
import pandas as pd
import numpy as np
from post_extraction_analysis import post_analyse
def filter_and_combine(output_folder):
post_analyse(output_folder + '/results_muse.csv',
output_folder + '/results_muse_eval.csv')
post_analyse(output_folder + '/results_mutect2.csv',
output_folder + '/results_mutect2_eval.csv')
post_analyse(output_folder + '/results_varscan2.csv',
output_folder + '/results_varscan2_eval.csv')
post_analyse(output_folder + '/results_somaticsniper.csv',
output_folder + '/results_somaticsniper_eval.csv')
df_muse = pd.read_csv(output_folder + '/results_muse_eval.csv')
df_mutect2 = pd.read_csv(output_folder + '/results_mutect2_eval.csv')
df_varscan2 = pd.read_csv(output_folder + '/results_varscan2_eval.csv')
df_ss = | pd.read_csv(output_folder + '/results_somaticsniper_eval.csv') | pandas.read_csv |
#!/usr/bin/env python3
import unittest
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.testing as pdtest
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from datafold.dynfold.transform import (
TSCApplyLambdas,
TSCFeaturePreprocess,
TSCFiniteDifference,
TSCIdentity,
TSCPolynomialFeatures,
TSCPrincipalComponent,
TSCRadialBasis,
TSCTakensEmbedding,
TSCTransformerMixin,
)
from datafold.pcfold.kernels import *
from datafold.pcfold.timeseries.collection import TSCDataFrame, TSCException
def _all_tsc_transformers():
# only finds the ones that are importated (DMAP e.g. is not here)
print(TSCTransformerMixin.__subclasses__())
class TestTSCTransform(unittest.TestCase):
def _setUp_simple_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
self.simple_df = pd.DataFrame(np.random.rand(9, 2), index=idx, columns=col)
def _setUp_takens_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
# Requires non-random values
self.takens_df_short = pd.DataFrame(
np.arange(18).reshape([9, 2]), index=idx, columns=col
)
n_samples_timeseries = 100
idx = pd.MultiIndex.from_product(
[np.array([0, 1]), np.arange(n_samples_timeseries)]
)
self.takens_df_long = pd.DataFrame(
np.random.rand(n_samples_timeseries * 2, 2), index=idx, columns=col
)
def setUp(self) -> None:
self._setUp_simple_df()
self._setUp_takens_df()
def test_is_valid_sklearn_estimator(self):
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils.estimator_checks import check_estimator
TEST_ESTIMATORS = (
TSCIdentity(),
TSCPrincipalComponent(),
TSCFeaturePreprocess(sklearn_transformer=MinMaxScaler()),
TSCFeaturePreprocess(sklearn_transformer=StandardScaler()),
TSCPolynomialFeatures(),
)
for test_estimator in TEST_ESTIMATORS:
for estimator, check in check_estimator(test_estimator, generate_only=True):
try:
check(estimator)
except Exception as e:
print(estimator)
print(check)
raise e
def test_identity0(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity()
pdtest.assert_frame_equal(_id.fit_transform(tsc), tsc)
pdtest.assert_frame_equal(_id.inverse_transform(tsc), tsc)
def test_identity1(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity(include_const=True)
tsc_plus_const = tsc.copy(deep=True)
tsc_plus_const["const"] = 1
pdtest.assert_frame_equal(_id.fit_transform(tsc.copy()), tsc_plus_const)
pdtest.assert_frame_equal(_id.inverse_transform(tsc_plus_const), tsc)
def test_identity2(self):
data = np.random.rand(5, 5)
data_wo_const = TSCIdentity(include_const=False).fit_transform(data)
data_plus_const = TSCIdentity(include_const=True).fit_transform(data)
nptest.assert_equal(data, data_wo_const)
nptest.assert_equal(data_plus_const, np.column_stack([data, np.ones(5)]))
def test_identity3(self):
data = TSCDataFrame(self.simple_df)
data_wo_const = TSCIdentity(
include_const=False, rename_features=True
).fit_transform(data)
data_with_const = TSCIdentity(
include_const=True, rename_features=True
).fit_transform(data)
data = data.add_suffix("_id")
pdtest.assert_index_equal(data.columns, data_wo_const.columns)
data["const"] = 1
pdtest.assert_index_equal(data.columns, data_with_const.columns)
def test_scale_min_max(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("min-max")
scaled_tsc = scale.fit_transform(tsc_df)
# sanity check:
nptest.assert_allclose(scaled_tsc.min().to_numpy(), np.zeros(2), atol=1e-16)
nptest.assert_allclose(scaled_tsc.max().to_numpy(), np.ones(2), atol=1e-16)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_scale_standard(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("standard")
scaled_tsc = scale.fit_transform(tsc_df)
nptest.assert_array_equal(
scaled_tsc.to_numpy(),
StandardScaler(with_mean=True, with_std=True).fit_transform(
tsc_df.to_numpy()
),
)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_sklearn_scaler(self):
tsc_df = TSCDataFrame(self.simple_df)
from sklearn.preprocessing import (
MaxAbsScaler,
PowerTransformer,
QuantileTransformer,
RobustScaler,
)
# each tuple has the class and a dictionary with the init-options
scaler = [
(MaxAbsScaler, dict()),
(PowerTransformer, dict(method="yeo-johnson")),
(PowerTransformer, dict(method="box-cox")),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="uniform"),
),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="normal"),
),
(RobustScaler, dict()),
]
for cls, kwargs in scaler:
scale = TSCFeaturePreprocess(sklearn_transformer=cls(**kwargs))
tsc_transformed = scale.fit_transform(tsc_df)
# Check the underlying array equals:
nptest.assert_array_equal(
cls(**kwargs).fit_transform(tsc_df.to_numpy()),
tsc_transformed.to_numpy(),
)
# check inverse transform is equal the original TSCDataFrame:
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(tsc_transformed))
def test_polynomial_feature_transform01(self):
from sklearn.preprocessing import PolynomialFeatures
tsc = TSCDataFrame(self.simple_df)
for degree in [2, 3, 4]:
for include_bias in [True, False]:
actual = TSCPolynomialFeatures(
degree=degree, include_bias=include_bias, include_first_order=True
).fit_transform(tsc)
expected = PolynomialFeatures(
degree=degree, include_bias=include_bias
).fit_transform(tsc.to_numpy())
nptest.assert_array_equal(actual.to_numpy(), expected)
def test_polynomial_feature_transform02(self):
tsc = TSCDataFrame(self.simple_df)
for include_first_order in [True, False]:
poly = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=include_first_order
).fit(tsc)
actual = poly.transform(tsc)
expected = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_frame_equal(actual, expected)
def test_polynomial_feature_transform03(self):
tsc = TSCDataFrame(self.simple_df)
actual = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["1", "A^2", "A B", "B^2"], name="feature"),
)
actual = TSCPolynomialFeatures(
degree=2, include_bias=False, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["A^2", "A B", "B^2"], name="feature"),
)
def test_apply_lambda_transform01(self):
# use lambda identity function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform02(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc.apply(np.square, axis=0, raw=True)
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
| pdtest.assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
"""
Functions for classifying mutation presence/absence based on gene expression data.
Many of these functions are adapted from:
https://github.com/greenelab/BioBombe/blob/master/9.tcga-classify/scripts/tcga_util.py
"""
import contextlib
import warnings
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import (
roc_auc_score,
roc_curve,
precision_recall_curve,
average_precision_score
)
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import GridSearchCV
import pancancer_evaluation.config as cfg
import pancancer_evaluation.utilities.data_utilities as du
import pancancer_evaluation.utilities.tcga_utilities as tu
from pancancer_evaluation.exceptions import (
OneClassError,
NoTrainSamplesError,
NoTestSamplesError
)
def train_cross_cancer(data_model,
train_gene_or_identifier,
test_identifier,
shuffle_labels=False):
"""
Train a model for a given identifier (gene/cancer type combination).
Arguments
---------
data_model (TCGADataModel): class containing preprocessed train/test data
train_gene_or_identifier (str): gene or gene/cancer type combo to train on
shuffle_labels (bool): whether or not to shuffle labels (negative control)
"""
signal = 'shuffled' if shuffle_labels else 'signal'
try:
X_train_df, X_test_df = tu.preprocess_data(data_model.X_train_raw_df,
data_model.X_test_raw_df,
data_model.gene_features,
data_model.subset_mad_genes)
y_train_df, y_test_df = data_model.y_train_df, data_model.y_test_df
except ValueError:
if data_model.X_train_raw_df.shape[0] == 0:
raise NoTrainSamplesError(
'No train samples found for train identifier: {}'.format(
train_gene_or_identifier)
)
elif data_model.X_test_raw_df.shape[0] == 0:
raise NoTestSamplesError(
'No test samples found for test identifier: {}'.format(
test_identifier)
)
try:
# if labels are extremely imbalanced, scikit-learn GridSearchCV
# will throw warnings, then we'll hit a ValueError later on when
# training the model.
#
# so, we ignore the warnings here, then catch the error later on
# to allow the calling function to skip these cases without a
# bunch of warning spam.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model_results = train_model(
X_train=X_train_df,
X_test=X_test_df,
y_train=y_train_df,
alphas=cfg.alphas,
l1_ratios=cfg.l1_ratios,
seed=data_model.seed,
n_folds=cfg.folds,
max_iter=cfg.max_iter
)
except ValueError:
raise OneClassError(
'Only one class present in train set for identifier: {}\n'.format(
train_gene_or_identifier)
)
# get coefficients
cv_pipeline = model_results[0]
coef_df = extract_coefficients(
cv_pipeline=cv_pipeline,
feature_names=X_train_df.columns,
signal=signal,
seed=data_model.seed
)
return model_results, coef_df
def evaluate_cross_cancer(data_model,
train_gene_or_identifier,
test_identifier,
model_results,
coef_df,
shuffle_labels=False,
train_pancancer=False):
"""
Evaluate a trained model for a given identifier (gene/cancer type combination).
Arguments
---------
data_model (TCGADataModel): class containing preprocessed train/test data
train_gene_or_identifier (str): gene or gene/cancer type combo to train on
shuffle_labels (bool): whether or not to shuffle labels (negative control)
train_pancancer (bool): whether or not to use pancancer data for training
"""
signal = 'shuffled' if shuffle_labels else 'signal'
(cv_pipeline,
y_pred_train_df,
_,
y_cv_df) = model_results
try:
X_train_df, X_test_df = tu.preprocess_data(data_model.X_train_raw_df,
data_model.X_test_raw_df,
data_model.gene_features,
data_model.subset_mad_genes)
y_train_df, y_test_df = data_model.y_train_df, data_model.y_test_df
except ValueError:
if data_model.X_train_raw_df.shape[0] == 0:
raise NoTrainSamplesError(
'No train samples found for train identifier: {}'.format(
train_gene_or_identifier)
)
elif data_model.X_test_raw_df.shape[0] == 0:
raise NoTestSamplesError(
'No test samples found for test identifier: {}'.format(
test_identifier)
)
y_pred_test_df = cv_pipeline.decision_function(X_test_df)
try:
# also ignore warnings here, same deal as above
with warnings.catch_warnings():
warnings.simplefilter("ignore")
metric_df, gene_auc_df, gene_aupr_df = get_metrics_cc(
y_train_df, y_test_df, y_cv_df, y_pred_train_df,
y_pred_test_df, train_gene_or_identifier, test_identifier,
signal, data_model.seed, train_pancancer=train_pancancer
)
except ValueError:
raise OneClassError(
'Only one class present in test set for train identifier: {}, '
'test identifier: {}\n'.format(train_gene_or_identifier, test_identifier)
)
results = {
'gene_metrics': metric_df,
'gene_auc': gene_auc_df,
'gene_aupr': gene_aupr_df,
'gene_coef': coef_df
}
return results
def run_cv_cancer_type(data_model,
gene,
cancer_type,
sample_info,
num_folds,
use_pancancer,
use_pancancer_only,
shuffle_labels,
use_coral=False,
coral_lambda=1.0,
coral_by_cancer_type=False,
cancer_types=None,
use_tca=False,
tca_params=None):
"""
Run cross-validation experiments for a given gene/cancer type combination,
then write them to files in the results directory. If the relevant files
already exist, skip this experiment.
Arguments
---------
data_model (TCGADataModel): class containing preprocessed train/test data
gene (str): gene to run experiments for
cancer_type (str): cancer type in TCGA to hold out
sample_info (pd.DataFrame): dataframe with TCGA sample information
num_folds (int): number of cross-validation folds to run
use_pancancer (bool): whether or not to use pancancer data
use_pancancer_only (bool): whether or not to use only pancancer data
shuffle_labels (bool): whether or not to shuffle labels (negative control)
TODO: what class variables does data_model need to have? should document
"""
results = {
'gene_metrics': [],
'gene_auc': [],
'gene_aupr': [],
'gene_coef': []
}
signal = 'shuffled' if shuffle_labels else 'signal'
for fold_no in range(num_folds):
try:
# if labels are extremely imbalanced, scikit-learn KFold used
# here will throw n_splits warnings, then we'll hit a ValueError
# later on when training the model.
#
# so, we ignore the warnings here, then catch the error later on
# to allow the calling function to skip these cases without a
# bunch of warning spam.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X_train_raw_df, X_test_raw_df = du.split_by_cancer_type(
data_model.X_df,
sample_info,
cancer_type,
num_folds=num_folds,
fold_no=fold_no,
use_pancancer=use_pancancer,
use_pancancer_only=use_pancancer_only,
seed=data_model.seed)
except ValueError:
raise NoTestSamplesError(
'No test samples found for cancer type: {}, '
'gene: {}\n'.format(cancer_type, gene)
)
if X_train_raw_df.shape[0] == 0:
# this might happen in pancancer only case
raise NoTrainSamplesError(
'No train samples found for cancer type: {}, '
'gene: {}\n'.format(cancer_type, gene)
)
y_train_df = data_model.y_df.reindex(X_train_raw_df.index)
y_test_df = data_model.y_df.reindex(X_test_raw_df.index)
if shuffle_labels:
# we set a temp seed here to make sure this shuffling order
# is the same for each gene between data types, otherwise
# it might be slightly different depending on the global state
with temp_seed(data_model.seed):
y_train_df.status = np.random.permutation(y_train_df.status.values)
y_test_df.status = np.random.permutation(y_test_df.status.values)
X_train_df, X_test_df = tu.preprocess_data(X_train_raw_df,
X_test_raw_df,
data_model.gene_features,
data_model.subset_mad_genes,
use_coral,
coral_lambda,
coral_by_cancer_type,
cancer_types,
use_tca,
tca_params)
try:
# also ignore warnings here, same deal as above
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model_results = train_model(
X_train=X_train_df,
X_test=X_test_df,
y_train=y_train_df,
alphas=cfg.alphas,
l1_ratios=cfg.l1_ratios,
seed=data_model.seed,
n_folds=cfg.folds,
max_iter=cfg.max_iter
)
(cv_pipeline,
y_pred_train_df,
y_pred_test_df,
y_cv_df) = model_results
except ValueError:
raise OneClassError(
'Only one class present in test set for cancer type: {}, '
'gene: {}\n'.format(cancer_type, gene)
)
# get coefficients
coef_df = extract_coefficients(
cv_pipeline=cv_pipeline,
feature_names=X_train_df.columns,
signal=signal,
seed=data_model.seed
)
coef_df = coef_df.assign(gene=gene)
coef_df = coef_df.assign(fold=fold_no)
try:
# also ignore warnings here, same deal as above
with warnings.catch_warnings():
warnings.simplefilter("ignore")
metric_df, gene_auc_df, gene_aupr_df = get_metrics(
y_train_df, y_test_df, y_cv_df, y_pred_train_df,
y_pred_test_df, gene, cancer_type, signal,
data_model.seed, fold_no
)
except ValueError:
raise OneClassError(
'Only one class present in test set for cancer type: {}, '
'gene: {}\n'.format(cancer_type, gene)
)
results['gene_metrics'].append(metric_df)
results['gene_auc'].append(gene_auc_df)
results['gene_aupr'].append(gene_aupr_df)
results['gene_coef'].append(coef_df)
return results
def run_cv_stratified(data_model, gene, sample_info, num_folds, shuffle_labels):
"""
Run stratified cross-validation experiments for a given gene, then
write the results to files in the results directory. If the relevant
files already exist, skip this experiment.
Arguments
---------
data_model (TCGADataModel): class containing preprocessed train/test data
gene (str): gene to run experiments for
sample_info (pd.DataFrame): dataframe with TCGA sample information
num_folds (int): number of cross-validation folds to run
shuffle_labels (bool): whether or not to shuffle labels (negative control)
TODO: what class variables does data_model need to have? should document
"""
results = {
'gene_metrics': [],
'gene_auc': [],
'gene_aupr': [],
'gene_coef': []
}
signal = 'shuffled' if shuffle_labels else 'signal'
for fold_no in range(num_folds):
try:
# if labels are extremely imbalanced, scikit-learn KFold used
# here will throw n_splits warnings, then we'll hit a ValueError
# later on when training the model.
#
# so, we ignore the warnings here, then catch the error later on
# to allow the calling function to skip these cases without a
# bunch of warning spam.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X_train_raw_df, X_test_raw_df, _ = du.split_stratified(
data_model.X_df, sample_info, num_folds=num_folds,
fold_no=fold_no, seed=data_model.seed)
except ValueError:
raise NoTestSamplesError(
'No test samples found for gene: {}\n'.format(gene)
)
y_train_df = data_model.y_df.reindex(X_train_raw_df.index)
y_test_df = data_model.y_df.reindex(X_test_raw_df.index)
if shuffle_labels:
# we set a temp seed here to make sure this shuffling order
# is the same for each gene between data types, otherwise
# it might be slightly different depending on the global state
with temp_seed(data_model.seed):
y_train_df.status = np.random.permutation(y_train_df.status.values)
y_test_df.status = np.random.permutation(y_test_df.status.values)
X_train_df, X_test_df = tu.preprocess_data(X_train_raw_df, X_test_raw_df,
data_model.gene_features,
data_model.subset_mad_genes)
try:
# also ignore warnings here, same deal as above
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model_results = train_model(
X_train=X_train_df,
X_test=X_test_df,
y_train=y_train_df,
alphas=cfg.alphas,
l1_ratios=cfg.l1_ratios,
seed=data_model.seed,
n_folds=cfg.folds,
max_iter=cfg.max_iter
)
(cv_pipeline,
y_pred_train_df,
y_pred_test_df,
y_cv_df) = model_results
except ValueError:
raise OneClassError(
'Only one class present in test set for gene: {}\n'.format(gene)
)
# TODO: separate below into another function (one returns raw results)
# get coefficients
coef_df = extract_coefficients(
cv_pipeline=cv_pipeline,
feature_names=X_train_df.columns,
signal=signal,
seed=data_model.seed
)
coef_df = coef_df.assign(gene=gene)
coef_df = coef_df.assign(fold=fold_no)
try:
# also ignore warnings here, same deal as above
with warnings.catch_warnings():
warnings.simplefilter("ignore")
metric_df, gene_auc_df, gene_aupr_df = get_metrics(
y_train_df, y_test_df, y_cv_df, y_pred_train_df,
y_pred_test_df, gene, 'N/A', signal, data_model.seed,
fold_no
)
except ValueError:
raise OneClassError(
'Only one class present in test set for gene: {}\n'.format(gene)
)
results['gene_metrics'].append(metric_df)
results['gene_auc'].append(gene_auc_df)
results['gene_aupr'].append(gene_aupr_df)
results['gene_coef'].append(coef_df)
return results
def train_model(X_train, X_test, y_train, alphas, l1_ratios, seed, n_folds=5, max_iter=1000):
"""
Build the logic and sklearn pipelines to train x matrix based on input y
Arguments
---------
X_train: pandas DataFrame of feature matrix for training data
X_test: pandas DataFrame of feature matrix for testing data
y_train: pandas DataFrame of processed y matrix (output from align_matrices())
alphas: list of alphas to perform cross validation over
l1_ratios: list of l1 mixing parameters to perform cross validation over
n_folds: int of how many folds of cross validation to perform
max_iter: the maximum number of iterations to test until convergence
Returns
------
The full pipeline sklearn object and y matrix predictions for training, testing,
and cross validation
"""
# Setup the classifier parameters
clf_parameters = {
"classify__loss": ["log"],
"classify__penalty": ["elasticnet"],
"classify__alpha": alphas,
"classify__l1_ratio": l1_ratios,
}
estimator = Pipeline(
steps=[
(
"classify",
SGDClassifier(
random_state=seed,
class_weight="balanced",
loss="log",
max_iter=max_iter,
tol=1e-3,
),
)
]
)
cv_pipeline = GridSearchCV(
estimator=estimator,
param_grid=clf_parameters,
n_jobs=-1,
cv=n_folds,
scoring='average_precision',
return_train_score=True,
# iid=False
)
# Fit the model
cv_pipeline.fit(X=X_train, y=y_train.status)
# Obtain cross validation results
y_cv = cross_val_predict(
cv_pipeline.best_estimator_,
X=X_train,
y=y_train.status,
cv=n_folds,
method="decision_function",
)
# Get all performance results
y_predict_train = cv_pipeline.decision_function(X_train)
y_predict_test = cv_pipeline.decision_function(X_test)
return cv_pipeline, y_predict_train, y_predict_test, y_cv
def extract_coefficients(cv_pipeline, feature_names, signal, seed):
"""
Pull out the coefficients from the trained classifiers
Arguments
---------
cv_pipeline: the trained sklearn cross validation pipeline
feature_names: the column names of the x matrix used to train model (features)
results: a results object output from `get_threshold_metrics`
signal: the signal of interest
seed: the seed used to compress the data
"""
final_pipeline = cv_pipeline.best_estimator_
final_classifier = final_pipeline.named_steps["classify"]
coef_df = pd.DataFrame.from_dict(
{"feature": feature_names, "weight": final_classifier.coef_[0]}
)
coef_df = (
coef_df.assign(abs=coef_df["weight"].abs())
.sort_values("abs", ascending=False)
.reset_index(drop=True)
.assign(signal=signal, seed=seed)
)
return coef_df
def get_threshold_metrics(y_true, y_pred, drop=False):
"""
Retrieve true/false positive rates and auroc/aupr for class predictions
Arguments
---------
y_true: an array of gold standard mutation status
y_pred: an array of predicted mutation status
drop: boolean if intermediate thresholds are dropped
Returns
-------
dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type
"""
roc_columns = ["fpr", "tpr", "threshold"]
pr_columns = ["precision", "recall", "threshold"]
roc_results = roc_curve(y_true, y_pred, drop_intermediate=drop)
roc_items = zip(roc_columns, roc_results)
roc_df = pd.DataFrame.from_dict(dict(roc_items))
prec, rec, thresh = precision_recall_curve(y_true, y_pred)
pr_df = pd.DataFrame.from_records([prec, rec]).T
pr_df = pd.concat([pr_df, pd.Series(thresh)], ignore_index=True, axis=1)
pr_df.columns = pr_columns
auroc = roc_auc_score(y_true, y_pred, average="weighted")
aupr = average_precision_score(y_true, y_pred, average="weighted")
return {"auroc": auroc, "aupr": aupr, "roc_df": roc_df, "pr_df": pr_df}
def get_metrics(y_train_df, y_test_df, y_cv_df, y_pred_train, y_pred_test,
gene, cancer_type, signal, seed, fold_no):
# get classification metric values
y_train_results = get_threshold_metrics(
y_train_df.status, y_pred_train, drop=False
)
y_test_results = get_threshold_metrics(
y_test_df.status, y_pred_test, drop=False
)
y_cv_results = get_threshold_metrics(
y_train_df.status, y_cv_df, drop=False
)
# summarize all results in dataframes
metric_cols = [
"auroc",
"aupr",
"gene",
"holdout_cancer_type",
"signal",
"seed",
"data_type",
"fold"
]
train_metrics_, train_roc_df, train_pr_df = summarize_results(
y_train_results, gene, cancer_type, signal,
seed, "train", fold_no
)
test_metrics_, test_roc_df, test_pr_df = summarize_results(
y_test_results, gene, cancer_type, signal,
seed, "test", fold_no
)
cv_metrics_, cv_roc_df, cv_pr_df = summarize_results(
y_cv_results, gene, cancer_type, signal,
seed, "cv", fold_no
)
# compile summary metrics
metrics_ = [train_metrics_, test_metrics_, cv_metrics_]
metric_df = pd.DataFrame(metrics_, columns=metric_cols)
gene_auc_df = pd.concat([train_roc_df, test_roc_df, cv_roc_df])
gene_aupr_df = | pd.concat([train_pr_df, test_pr_df, cv_pr_df]) | pandas.concat |
import datetime as dt
import itertools
import json
import logging
import re
from functools import cached_property
from itertools import product
from typing import Callable, List, Mapping, Optional, Sequence, Union
import numpy as np
import pandas as pd
import tushare as ts
from ratelimiter import RateLimiter
from retrying import retry
from tqdm import tqdm
from .data_source import DataSource
from .. import config, constants, date_utils, utils
from ..database_interface import DBInterface
from ..tickers import FundTickers, FundWithStocksTickers, StockFundTickers, StockTickers
START_DATE = {
'common': dt.datetime(1990, 1, 1),
'shibor': dt.datetime(2006, 10, 8),
'ggt': dt.datetime(2016, 6, 29), # 港股通
'hk_cal': dt.datetime(1980, 1, 1),
'hk_daily': dt.datetime(1990, 1, 2),
'fund_daily': dt.datetime(1998, 4, 6),
'index_daily': dt.datetime(2008, 1, 1),
'index_weight': dt.datetime(2005, 1, 1)
}
class TushareData(DataSource):
def __init__(self, tushare_token: str = None, db_interface: DBInterface = None, param_json_loc: str = None) -> None:
"""
Tushare to Database. 将tushare下载的数据写入数据库中
:param tushare_token: tushare token
:param db_interface: DBInterface
:param param_json_loc: tushare 返回df的列名信息
"""
if tushare_token is None:
tushare_token = config.get_global_config()['tushare']['token']
db_interface = config.get_db_interface()
super().__init__(db_interface)
self.token = tushare_token
self._pro = None
self._factor_param = utils.load_param('tushare_param.json', param_json_loc)
def login(self):
self._pro = ts.pro_api(self.token)
def logout(self):
self._pro = ts.pro_api('')
def init_db(self):
"""Initialize database data. They cannot be achieved by naive ``update_*`` function"""
self.init_hk_calendar()
self.init_stock_names()
self.init_accounting_data()
fund_tickers = FundTickers(self.db_interface).all_ticker()
self.update_fund_portfolio(fund_tickers)
def update_base_info(self):
"""Update calendar and ticker lists"""
self.update_calendar()
self.update_hk_calendar()
self.update_stock_list_date()
self.update_convertible_bond_list_date()
# self.update_fund_list_date()
self.update_future_list_date()
self.update_option_list_date()
#######################################
# init func
#######################################
def init_hk_calendar(self) -> None:
""" 更新港交所交易日历 """
table_name = '港股交易日历'
if self.db_interface.get_latest_timestamp(table_name):
df = self._pro.hk_tradecal(is_open=1)
else:
storage = []
end_dates = ['19850101', '19900101', '19950101', '20000101', '20050101', '20100101', '20150101', '20200101']
for end_date in end_dates:
storage.append(self._pro.hk_tradecal(is_open=1, end_date=end_date))
storage.append(self._pro.hk_tradecal(is_open=1))
df = pd.concat(storage, ignore_index=True).drop_duplicates()
cal_date = df.cal_date
cal_date = cal_date.sort_values()
cal_date.name = '交易日期'
cal_date = cal_date.map(date_utils.date_type2datetime)
self.db_interface.update_df(cal_date, table_name)
def init_stock_names(self):
"""获取所有股票的曾用名"""
raw_df = self.update_stock_names()
raw_df_start_dates = raw_df.index.get_level_values('DateTime').min()
uncovered_stocks = self.stock_tickers.ticker(raw_df_start_dates)
with tqdm(uncovered_stocks) as pbar:
for stock in uncovered_stocks:
pbar.set_description(f'下载{stock}的股票名称')
self.update_stock_names(stock)
pbar.update()
logging.getLogger(__name__).info('股票曾用名下载完成.')
@cached_property
def stock_tickers(self) -> StockTickers:
return StockTickers(self.db_interface)
#######################################
# listing funcs
#######################################
def update_calendar(self) -> None:
""" 更新上交所交易日历 """
table_name = '交易日历'
df = self._pro.trade_cal(is_open=1)
cal_date = df.cal_date
cal_date.name = '交易日期'
cal_date = cal_date.map(date_utils.date_type2datetime)
self.db_interface.purge_table(table_name)
self.db_interface.insert_df(cal_date, table_name)
def update_hk_calendar(self) -> None:
""" 更新港交所交易日历 """
table_name = '港股交易日历'
df = self._pro.hk_tradecal(is_open=1)
cal_date = df.cal_date
cal_date = cal_date.sort_values()
cal_date.name = '交易日期'
cal_date = cal_date.map(date_utils.date_type2datetime)
cal_date.index.name = 'index'
db_data = self.db_interface.read_table(table_name)
db_data = db_data.loc[db_data['交易日期'] < cal_date.min(), :]
data = pd.concat([db_data.set_index('index').iloc[:, 0], cal_date], ignore_index=True)
self.db_interface.purge_table(table_name)
self.db_interface.insert_df(data, table_name)
def update_stock_list_date(self) -> None:
""" 更新所有股票列表, 包括上市, 退市和暂停上市的股票
ref: https://tushare.pro/document/2?doc_id=25
"""
data_category = '股票列表'
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
list_status = ['L', 'D', 'P']
fields = ['ts_code', 'list_date', 'delist_date']
for status in list_status:
storage.append(self._pro.stock_basic(exchange='', list_status=status, fields=fields))
output = pd.concat(storage)
output['证券类型'] = 'A股股票'
list_info = self._format_list_date(output.loc[:, ['ts_code', 'list_date', 'delist_date', '证券类型']])
self.db_interface.update_df(list_info, '证券代码')
logging.getLogger(__name__).info(f'{data_category}下载完成.')
# TODO
def get_hk_stock_list_date(self):
""" 更新所有港股股票列表, 包括上市, 退市和暂停上市的股票
ref: https://tushare.pro/document/2?doc_id=25
"""
data_category = '股票列表'
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
list_status = ['L', 'D']
for status in list_status:
storage.append(self._pro.hk_basic(list_status=status))
output = | pd.concat(storage) | pandas.concat |
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = | pd.DataFrame({'icol': [1, 2, 3, 4, 5], 'col 2': [1, 2, 3, 4, 5], 'col3': [100, 200, 300, 400, 500]}) | pandas.DataFrame |
#不可一气呵成跑完!中间需要对表格调整 分块执行代码
import numpy as np
from numpy import *
import pandas as pd
df = pd.read_csv('data.csv',encoding='gbk')
#数据清洗 先用EPS平台对所需数据调整后导入,故不存在错误数据、多余数据与重复数据,故只简化表格与缺失值处理
df=df.dropna(how="all")
df=df.drop([0])#delete year
#for i in range(df.shape[0]):
#由于所分析问题不针对具体地区,找出缺失值大于1的行并删除
todel=[]
for i in range(df.shape[0]):
sum = 0
for j in range(df.shape[1]):
if pd.isnull(df.iloc[i,j]):
sum+=1
if sum>=2:
todel.append(i)
break
df=df.drop(todel)
#拉格朗日乘子法作缺失值处理
from scipy.interpolate import lagrange
def ploy(s,n,k=6):
y=s[list(range(n-k,n))+list(range(n+1,n+1+k))]#取数
y=y[y.notnull()]
return lagrange(y.index,list(y))(n)
for i in df.columns:
for j in range(len(df)):
if (df[i].isnull())[j]:
df[i][j]=ploy(df[i],j)
df.to_excel('data222.xls')
#利用KMO检验与Bartlett检验判断因子分析法是否合适
import numpy as np
import math as math
dataset = pd.read_csv('data222.csv', encoding='gbk')
dataset = dataset.drop(['no','Unnamed: 0'],axis=1)
def corr(data):
return np.corrcoef(dataset)
dataset_corr = corr(dataset)#Pearson's r Pearson积矩相关系数#数据标准化
tru = | pd.read_csv('true.csv', encoding='gbk') | pandas.read_csv |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from wakeful import preprocessing, pipelining, scoring, log_munger
def main():
data_dir = './data/'
keys = [
('iodine_forwarded_2017_12_31_conn_test', 'iodine_forwarded_2017_12_31_conn_train'),
('iodine_raw_2017_12_31_conn_test', 'iodine_raw_2017_12_31_conn_train'),
('dnscat2_2017_12_31_conn_test', 'dnscat2_2017_12_31_conn_train'),
('iodine_forwarded_2017_12_31_dns_test', 'iodine_forwarded_2017_12_31_dns_train'),
('iodine_raw_2017_12_31_dns_test', 'iodine_raw_2017_12_31_dns_train'),
('dnscat2_2017_12_31_dns_test', 'dnscat2_2017_12_31_dns_train')]
results = []
for test_key, train_key in keys:
# read in the persisted dataframes
train_df = log_munger.hdf5_to_df(train_key, data_dir)
test_df = log_munger.hdf5_to_df(test_key, data_dir)
# list of dicts with scoring of leave-one-out modeling results
print(type(test_df))
print(test_df.head())
result = leave_one_out(train_df, test_df, test_key)
# grow the list
results.extend(result)
# create dataframe
print(results)
df = | pd.DataFrame.from_records(results) | pandas.DataFrame.from_records |
### Lab2: Supervised learning
## Part 1. Data exploration and pre-processing
# Load Data
# 1. Import libraries
import numpy as np
import pandas as pd
# 2. Load the US Census 1994 dataset
data = | pd.read_csv("Lab2\\us-census-dataset.csv") | pandas.read_csv |
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import matplotlib
from collections import OrderedDict
from os.path import isdir, split
from pathlib import Path
from typing import List, Union
# Force matplotlib to not use any Xwindows backend.
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import umap
from lama.common import LoadImage
from lama.stats.standard_stats.data_loaders import LineData
THRESH = 4.0
MAX_Z = 400 # If size extent is larger, then rescale to speed things up/reduce memory usage
PCA_N_COMPONENTS = 50
# t-sne parameters
TSNE_PARAMETERS = {
'n_components': 2,
'init': 'pca',
'random_state': 7,
'n_iter': 500,
'perplexity': 4,
'learning_rate': 100,
'method': 'barnes_hut',
'metric': 'correlation'
}
def _plot(data: pd.DataFrame, title: str, outpath):
"""
Plt the results of the clustering
"""
ax = sns.scatterplot(x='x', y='y', data=data)
plt.title(title)
i = 1
id_map = []
for spec_id, row in data.iterrows():
ax.text(row['x'] + 0.08, row['y'], str(i), horizontalalignment='center', size='medium', color='black', weight='semibold')
id_map.append([i, spec_id])
i += 1
plt.savefig(outpath)
plt.close()
df_ids = pd.DataFrame.from_records(id_map, columns=['id', 'specimen'], index='id')
df_ids.to_csv(outpath.with_suffix('.csv'))
def umap_organs(data: pd.DataFrame, outpath: Path, title=''):
embedding = umap.UMAP(n_neighbors=2,
min_dist=0.2,
metric='correlation').fit_transform(data)
df = | pd.DataFrame(embedding[:, 0:2], index=data.index, columns=['x', 'y']) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
from datetime import datetime
import seaborn as sns
import re
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder
class trainModel:
def setConstants(self, model_id):
# 新房数据表路径
self.newdisk_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDisk.csv'
# 房源属性数据表路径
self.property_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_Property.csv'
# 地址数据表路径
self.address_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDiskAddress.csv'
# 挂牌数据路径
self.data_path = os.path.dirname(os.path.realpath(__file__)) + '/data/'
self.model_dir = os.path.dirname(os.path.realpath(__file__)) + '/cache/model_%s/' % (model_id)
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
# 房源中位数价格路径
self.medprice_path = self.model_dir + '/medprice.csv'
# 区名特征化路径
self.arealabel_path = self.model_dir + '/arealabel.csv'
# 板块名特征化路径
self.platelabel_path = self.model_dir + '/platelabel.csv'
# 内中外环特征化路径
self.modulelabel_path = self.model_dir + 'modulelabel.csv'
# 模型缓存路径
self.cache_path = self.model_dir + '/model.txt'
def setParams(self, model_id):
from ..models import models_logs
model = models_logs.objects.get(id=model_id)
self.beginDate = model.startMonth.strftime('%Y-%m')
self.endDate = model.endMonth.strftime('%Y-%m')
self.objective = model.objective
self.metric = model.metric
self.learning_rate = model.learning_rate
self.feature_fraction = model.feature_fraction
self.bagging_fraction = model.bagging_fraction
self.max_depth = model.max_depth
self.num_leaves = model.num_leaves
self.bagging_freq = model.bagging_freq
self.min_data_in_leaf = model.min_data_in_leaf
self.min_gain_to_spilt = model.min_gain_to_split
self.lambda_l1 = model.lambda_l1
self.lambda_l2 = model.lambda_l2
self.verbose = model.verbose
def name_filter(self, name):
"""小区名正则过滤"""
n = re.compile('\(|\(|一期|二期').split(name)[0]
n = re.sub(r'\(.*?\)', '', re.sub(r'\(.*?\)', '', n))
n = n.strip('*0123456789(())')
n = n.split('第')[0]
return n
def address_filter(self, address):
"""小区地址清洗"""
n = re.compile(',|,|、').split(address)[0]
n = re.sub(r'\(.*?\)', '', re.sub(r'\(.*?\)', '', n))
n = n.strip('*0123456789')
return n
def time_map(self, time):
if type(time) == str:
split_char = '/' if '/' in time else '-'
return int(time.split(split_char)[0])
return None
def floor_map(self, floor):
# 楼层映射
return list(pd.cut(floor, [0, 3, 6, 9, np.inf], labels=['低层', '多层', '小高层', '高层']))
def make_coordinates(self, data):
coors = []
# for i in tqdm(data):
for i in data:
if type(i) == str and i != '公寓' and i != '商业' and i != '其它':
coors.append(i.split(','))
else:
coors.append([None, None])
coors = pd.DataFrame(coors, columns=['loc_x', 'loc_y'])
# coors=pd.DataFrame([coor.split(',') for coor in all_df.Coordinates],columns=['loc_x','loc_y'],index=all_df.index)
coors = coors.astype(float)
return coors
def load_guapai(self, name, month):
"""读取挂牌数据"""
# 训练模型,使用本地数据,提高效率。
with open(os.path.join(self.data_path, name, '挂牌.txt'), encoding='utf-8') as f:
l = []
for i in f.readlines():
l.append(i.split('\t'))
df = pd.DataFrame(l)
drop_col = [0, 15, 16, 18, 19, 20, 21]
if len(df.columns) == 23:
drop_col.append(22)
df.drop(drop_col, axis=1, inplace=True) # 去除无用列
df.columns = ['area', 'address', 'name', 'price', 'unit_price', 'acreage', 'room_type', 'all_floor',
'floor',
'shore', 'house_type', 'fitment', 'time', 'Source', 'house_trait']
df['month'] = month
print('load %s' % name)
return df
def load_data(self):
"""加载训练数据"""
print('加载挂牌训练数据...')
cache_path = os.path.dirname(os.path.realpath(__file__)) + '/cache/guapai_%s-%s.hdf' % (self.beginDate, self.endDate)
if os.path.exists(cache_path):
# 加载缓存
meta_df = pd.read_hdf(cache_path, 'meta')
all_df = pd.read_hdf(cache_path, 'data')
else:
# pool = Pool()
# files=[i for i in os.listdir(data_path) if os.path.dirname(os.path.realpath(__file__))+'' not in i]
files = np.unique(
[datetime.strftime(x, '%Y-%m') for x in list(pd.date_range(start=self.beginDate, end=self.endDate))])
# files = sorted(files)
# dfs = [pool.apply_async(load_guapai, (name, month)) for month, name in enumerate(files)]
# pool.close()
# pool.join()
# dfs = [i.get() for i in dfs]
dfs = []
for month, name in enumerate(files):
dfs.append(self.load_guapai(name, str(month)))
print('共加载%s个月份的挂牌数据...' % len(dfs))
all_df = pd.concat(dfs, ignore_index=True)
# 获取经纬度信息
newdisk_df = pd.read_csv(self.newdisk_path, usecols=['NewDiskID', 'PropertyID', 'NewDiskName', 'Coordinates'])
# newdisk_df = tools.read_basic_table('AD_NewDisk') 训练模型,使用本地数据,不再读取数据库。
newdisk_df.rename(columns={'NewDiskName': 'name'}, inplace=True)
# 获取板块、环线信息
property_df = pd.read_csv(self.property_path, usecols=['PropertyID', 'Area', 'Plate', 'Module', 'HousingName'])
property_df.rename(columns={'Area': 'area', 'HousingName': 'name'}, inplace=True)
# 获取楼盘地址信息
address_df = pd.read_csv(self.address_path, usecols=['RoadLaneNo', 'NewDiskID'])
address_df.rename(columns={'RoadLaneNo': 'address'}, inplace=True)
# merge them
meta_df = pd.merge(newdisk_df, property_df.drop('name', axis=1), on='PropertyID', how='left')
# meta_df=pd.merge(meta_df,address_df,on='NewDiskID',how='left')
# 小区名称清洗
index = meta_df.name.notnull()
meta_df.loc[index, 'name'] = meta_df.loc[index, 'name'].apply(self.name_filter)
all_df.name = all_df.name.apply(self.name_filter)
address_df.address = address_df.address.apply(self.address_filter)
all_df.address = all_df.address.apply(self.address_filter)
# 转换数值类型 str->float
numerical_columns = ['price', 'unit_price', 'acreage', 'all_floor', 'floor']
all_df[numerical_columns] = all_df[numerical_columns].astype(float)
all_df['No'] = range(all_df.shape[0])
address_match = pd.merge(all_df[['No', 'address']], address_df, on='address', how='inner')
name_match = pd.merge(all_df[['No', 'name', 'area']], meta_df[['name', 'area', 'NewDiskID']],
on=['name', 'area'],
how='inner')
match = pd.concat((address_match[['No', 'NewDiskID']], name_match[['No', 'NewDiskID']]), ignore_index=True)
match.drop_duplicates(keep='first', inplace=True)
match = match.sort_values('No')
all_df = all_df.loc[match.No]
all_df['NewDiskID'] = match.NewDiskID.values
all_df.drop('No', axis=1, inplace=True)
all_df = pd.merge(all_df, meta_df[['NewDiskID', 'Coordinates', 'Plate', 'Module']], on='NewDiskID',
how='left')
meta_df.to_hdf(cache_path, 'meta')
all_df.to_hdf(cache_path, 'data')
return meta_df, all_df
def preprocess(self, all_df):
"""特征预处理"""
print('清洗挂牌数据...')
cache_path = os.path.dirname(os.path.realpath(__file__)) + '/cache/feats_%s-%s.hdf' % (self.beginDate, self.endDate)
if os.path.exists(cache_path):
all_df = pd.read_hdf(cache_path, 'data')
else:
# 修正面积
acreage_log = np.log(all_df.acreage)
mean = acreage_log.mean()
std = acreage_log.std()
i = acreage_log[(acreage_log <= mean + 2 * std) & (acreage_log >= mean - 1 * std)].index
sns.set({'figure.figsize': (8, 4)})
sns.boxplot(all_df.loc[i].acreage)
all_df.loc[i].acreage.describe()
all_df = all_df.loc[i]
# 修正单价
unit_price_log = np.log1p(all_df.unit_price)
mean = unit_price_log.mean()
std = unit_price_log.std()
i = unit_price_log[(unit_price_log <= mean + 3 * std) & (unit_price_log >= mean - 3.2 * std)].index
sns.set({'figure.figsize': (8, 4)})
sns.boxplot(all_df.loc[i].unit_price)
all_df.loc[i].unit_price.describe()
all_df = all_df.loc[i]
# 修复总价
# 修复总价单位误差
all_df.loc[all_df.price <= 10000, 'price'] *= 10000
# 差价分布
anomaly_price = np.abs(all_df.unit_price * all_df.acreage - all_df.price)
anomaly_price_index = anomaly_price[anomaly_price > 100000].index # 差价太多为异常点
# 直接删除异常样本
all_df.drop(anomaly_price_index, axis=0, inplace=True)
# 环线
all_df.loc[all_df[all_df.Module == '所有'].index, 'Module'] = '内环内'
# sorted_module = all_df[['unit_price', 'Module']].groupby('Module').median().sort_values('unit_price')
# i = pd.Series(range(0, sorted_module.shape[0]), index=sorted_module.index)
# all_df.Module = all_df.Module.map(i.to_dict())
# 楼层映射
all_df.loc[all_df.floor < 0, 'floor'] = np.nan
# 分段映射
all_df['floor_section'] = self.floor_map(all_df.floor)
# 朝向因素
# 暂无为缺省字段
all_df.shore.replace({'暂无数据': '暂无', ' ': '暂无', '': '暂无'}, inplace=True)
sorted_shore = all_df[['unit_price', 'shore']].groupby('shore').mean().sort_values('unit_price')
i = pd.Series(range(0, sorted_shore.shape[0]), index=sorted_shore.index)
all_df.shore = all_df.shore.map(i.to_dict())
# 房屋类型
all_df.loc[all_df[(all_df.house_type == '其它') | (all_df.house_type == '工厂')].index, 'house_type'] = '公寓'
sorted_house_type = all_df[['house_type', 'unit_price']].groupby('house_type').median().sort_values(
'unit_price')
i = pd.Series(range(0, sorted_house_type.shape[0]), index=sorted_house_type.index)
i.to_dict()
all_df.house_type = all_df.house_type.map(i.to_dict())
# 装修情况
default_fit = '暂无' # 缺省字段填充
all_df.fitment.replace({'': default_fit, '暂无数据': default_fit, '豪华装': '豪装', '其他': default_fit}, inplace=True)
all_df.fitment = all_df.fitment.apply(lambda x: x.strip('修'))
sorted_fitment = all_df[['fitment', 'unit_price']].groupby('fitment').median().sort_values('unit_price')
i = pd.Series(range(0, sorted_fitment.shape[0]), index=sorted_fitment.index)
all_df.fitment = all_df.fitment.map(i.to_dict())
# 房型
r = re.compile('室|厅|厨|卫') # 正则提取房型数据
l = [map(int, r.split(i)[:-1]) for i in all_df.room_type]
room_type_df = pd.DataFrame(l, index=all_df.index, columns=['室', '厅', '厨', '卫'])
all_df = pd.concat((all_df, room_type_df), axis=1)
# 时间
all_df.time = all_df.time.apply(lambda x: self.time_map(x)).astype(int)
all_df.time = all_df.time.apply(lambda x: min(2018 - x, 100) if 0 < x <= 2018 else None)
# 经纬度
coors = self.make_coordinates(all_df.Coordinates.values)
all_df.index = coors.index
all_df = pd.concat((all_df, coors), axis=1).drop('Coordinates', axis=1)
# 缓存特征矩阵
all_df = all_df[all_df.unit_price.notnull()]
all_df.to_hdf(cache_path, 'data')
print('共有%d条训练数据' % all_df.shape[0])
return all_df
def train_model(self, x_train, y_train):
# * LightGBM
# cache_path = os.path.dirname(os.path.realpath(__file__))+'/cache/model_%s-%s_%s.txt' % (beginDate, endDate, x_train.shape[1])
if os.path.exists(self.cache_path):
print('使用缓存中的模型,不再训练...')
gbm = lgb.Booster(model_file=self.cache_path)
else:
print('开始模型训练...')
# 设置模型参数
# params = {
# 'objective': 'regression',
# 'metric': 'mse',
# 'learning_rate': 0.2,
# 'feature_fraction': 0.6,
# 'bagging_fraction': 0.6,
# 'max_depth': 14,
# 'num_leaves': 220,
# 'bagging_freq': 5,
# 'min_data_in_leaf': 10,
# 'min_gain_to_split': 0,
# 'lambda_l1': 1,
# 'lambda_l2': 1,
# 'verbose': 0,
# }
params = {
'objective': self.objective,
'metric': self.metric,
'learning_rate': self.learning_rate,
'feature_fraction': self.feature_fraction,
'bagging_fraction': self.bagging_fraction,
'max_depth': self.max_depth,
'num_leaves': self.num_leaves,
'bagging_freq': self.bagging_freq,
'min_data_in_leaf': self.min_data_in_leaf,
'min_gain_to_split': self.min_gain_to_spilt,
'lambda_l1': self.lambda_l1,
'lambda_l2': self.lambda_l2,
'verbose': self.verbose,
}
lgb_train = lgb.Dataset(x_train, y_train, categorical_feature=['area', 'Plate', 'Module', 'floor_section'])
gbm = lgb.train(params, lgb_train, num_boost_round=750)
gbm.save_model(self.cache_path)
return gbm
def make_train_set(self, all_df):
'''计算单套价格'''
# 训练集
x_train = all_df[
['acreage', 'all_floor', 'floor', 'time', 'NewDiskID',
'area', 'Plate', 'Module', 'floor_section', 'loc_x', 'loc_y']]
# 计算小区房价中位数
med_price = pd.concat((x_train.NewDiskID, all_df.unit_price), axis=1)
med_price = med_price.groupby('NewDiskID', as_index=False)['unit_price'].agg({'median': 'mean'})
med_price.to_csv(self.medprice_path, index=False)
x_train = pd.merge(x_train, med_price, on='NewDiskID', how='left')
# 将离散型变量转换成整型,用于lgb训练
area_le = LabelEncoder() # 大版块
arealabel_name = | pd.unique(x_train.area) | pandas.unique |
"""
Forecast datasets & data generators
"""
import os.path
from typing import Union, List
import numpy as np
from numpy.random import default_rng
import pandas as pd
"""
Synthetic sequences of (non-iid) true probs/means
"""
def bernoulli(
n: int,
p: Union[float, List, np.ndarray] = 0.5,
rng: np.random.Generator = np.random.default_rng(),
) -> np.ndarray:
"""Return a sequence of Bernoulli random variables."""
return rng.binomial(1, p, size=n)
def zeros_then_ones(
n_zeros: int,
n_ones: int,
) -> np.ndarray:
"""Return a sequence of `n_zeros` 0's followed by `n_ones` 1's."""
return np.concatenate([np.zeros((n_zeros, )), np.ones((n_ones, ))])
def zeros_then_ones_repeated(
n: int,
n_spans: int,
roll: int = 0,
) -> np.ndarray:
"""Return a repeating sequence of 0's and 1's."""
assert 1 <= n_spans <= n
span = n // n_spans
ys = np.concatenate([
zeros_then_ones(span, span)
for _ in range((n_spans + 1) // 2)
])[:n]
return np.roll(ys, roll)
def randoms_zeros_then_ones(
n_randoms: int,
n_zeros: int,
n_ones: int,
p: float = 0.5,
rng: np.random.Generator = default_rng(),
) -> np.ndarray:
"""Return a sequence of `n_randoms` Bernoulli(p) random variables,
followed by `n_zeros` 0's and `n_ones` 1's."""
return np.concatenate([rng.binomial(1, p, size=n_randoms),
np.zeros((n_zeros, )),
np.ones((n_ones, ))])
def default(
n: int,
):
"""Default setting for the paper.
Random for the first 100, and then repeated zeros-then-ones in
each log-scale span ([101, 1000], [1001, 10000], ...).
"""
n_spans = int(np.log10(n))
assert n_spans >= 2, f"default setting requires n > 100 (given: {n})"
seqs = [np.repeat(0.5, 100)]
for span in range(2, n_spans):
r = 10 ** (span + 1) - 10 ** span
seqs.append(zeros_then_ones(r // 4, r // 4))
seqs.append(zeros_then_ones(r // 4, r // 4)[::-1])
return np.concatenate(seqs)
def sigmoid(
n: int,
changepoint: float = 0.25,
) -> np.ndarray:
"""Return a sequence of values between [0, 1] that follow a sigmoid fn."""
grid = 20. * (np.linspace(0, 1, num=n) - changepoint) # [-10, 10]
return 1. / (1. + np.exp(-grid))
"""
Presets:
binary: pd.DataFrame(time, data, true_probs)
continuous: pd.DataFrame(time, data, true_means, true_params)
"""
def make_preset(
true_probs: np.ndarray,
rng: np.random.Generator = default_rng(),
):
"""A helper function that makes binary data given true probabilities."""
n = len(true_probs)
data = bernoulli(n, true_probs, rng=rng)
return pd.DataFrame({
"time": np.arange(1, n + 1),
"data": data,
"true_probs": true_probs,
})
def preset_default(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Default synthetic data.
Generated from a noisy version of
100 1/2s, 1000 1s, 1000 0s, 1000 1s, 1000 0s, ..., 1000 1s, and 500 0s."""
pattern = default(n)
true_probs = 0.8 * pattern + 0.2 * (1 - pattern)
true_probs = np.clip(true_probs + rng.normal(0, noise, n), 0, 1)
return make_preset(true_probs, rng)
def preset_random(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Random synthetic data: true_prob == 0.5 + noise for all rounds."""
true_probs = np.repeat(0.5, n)
true_probs = np.clip(true_probs + rng.normal(0, noise, n), 0, 1)
return make_preset(true_probs, rng)
def preset_sigmoid(
n: int,
noise: float = 0.25,
rng: np.random.Generator = default_rng(),
changepoint: float = 0.25, # between [0, 1]
) -> pd.DataFrame:
"""A smoothly increasing function with a changepoint + sinusoidal noise."""
pattern = sigmoid(n, changepoint)
sine_noise = np.sin(0.1 * np.arange(n)) + rng.normal(0, 1, n)
true_probs = np.clip(pattern + noise * sine_noise, 0, 1)
return make_preset(true_probs, rng)
def make_preset_beta(
true_means: np.ndarray,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""A helper function that makes continuous data given true means, where
y_t ~ Beta(r_t, 1 - r_t)."""
n = len(true_means)
true_params = [true_means, 1. - true_means]
data = rng.beta(*true_params)
out = {
"time": np.arange(1, n + 1),
"data": data,
"true_means": true_means,
"true_dist": ["beta" for _ in range(n)],
}
out.update({
f"true_param{i}": true_param
for i, true_param in enumerate(true_params)
})
return | pd.DataFrame(out) | pandas.DataFrame |
import logging
import pandas
import datetime
import requests
import io
from typing import Dict, Tuple, Union
from .. import preprocessing
_log = logging.getLogger(__file__)
# From https://en.wikipedia.org/wiki/Provinces_of_Belgium
BE_REGION_NAMES = {
'all': 'Belgium',
'FLA': 'Vlaanderen',
'WAL': 'Wallonie',
'BRU': 'Brussel',
'ANT': 'Antwerpen',
'LIM': 'Limburg',
'EFL': 'Oost-Vlaanderen',
'FBR': 'Vlaams-Brabant',
'WFL': 'West-Vlaanderen',
'HAI': 'Hainaut',
'LIE': 'Liège',
'LUX': 'Luxembourg',
'NAM': 'Namur',
'WBR': 'Brabant wallon',
}
# Province and region codes
# [ISO 3166-2:BE](https://en.wikipedia.org/wiki/ISO_3166-2:BE#Provinces) has no english codes
# Mapping of the keys in columns 'REGION' and 'PROVINCE' in the input file to a short code.
BE_REGION_INPUT_ABBR = {
'all': 'all',
'Flanders': 'FLA',
'Wallonia': 'WAL',
'Brussels': 'BRU',
'Antwerpen': 'ANT',
'Limburg': 'LIM',
'OostVlaanderen': 'EFL',
'VlaamsBrabant': 'FBR',
'WestVlaanderen': 'WFL',
'Hainaut': 'HAI',
'Liège': 'LIE',
'Luxembourg': 'LUX',
'Namur': 'NAM',
'BrabantWallon': 'WBR',
}
BE_REGION_CODES = {
v : k
for k, v in BE_REGION_NAMES.items()
}
# Source: https://www.ibz.rrn.fgov.be/fileadmin/user_upload/fr/pop/statistiques/population-bevolking-20200101.pdf
BE_REGION_POPULATION = {
'all': 11_476_279, # Belgium
'FLA': 6_623_505,
'WAL': 3_641_748,
'BRU': 1_211_026,
'ANT': 1_867_366,
'LIM': 876_785,
'EFL': 1_524_077,
'FBR': 1_155_148,
'WFL': 1_200_129,
'HAI': 1_345_270,
'LIE': 1_108_481,
'LUX': 286_571,
'NAM': 495_474,
'WBR': 405_952
}
def get_data_BE(run_date: pandas.Timestamp) -> pandas.DataFrame:
"""
Retrieve daily (run_date) regions and append national data (key 'all') to it
Parameters
----------
run_date : pandas.Timestamp
date for which the data shall be downloaded
Returns
-------
df : pandas.DataFrame
table with columns as required by rtlive/data.py API
"""
def redistribute(group: pandas.DataFrame, col: str) -> pandas.Series:
gdata = group.groupby('REGION')[col].sum()
gdata.loc['Brussels'] += gdata.loc['Nan'] * (gdata.loc['Brussels']/(gdata.loc['Brussels'] + gdata.loc['Flanders'] + gdata.loc['Wallonia']))
gdata.loc['Flanders'] += gdata.loc['Nan'] * (gdata.loc['Flanders']/(gdata.loc['Brussels'] + gdata.loc['Flanders'] + gdata.loc['Wallonia']))
gdata.loc['Wallonia'] += gdata.loc['Nan'] * (gdata.loc['Wallonia']/(gdata.loc['Brussels'] + gdata.loc['Flanders'] + gdata.loc['Wallonia']))
gdata.drop(index='Nan', inplace=True)
gdata = gdata.fillna(0).round(0).astype(int)
return gdata
if run_date.date() > datetime.date.today():
raise ValueError('Run date is in the future. Nice try.')
if run_date.date() < datetime.date.today():
# TODO: implement downloading of historic data
raise NotImplementedError(
'Downloading with a run_date is not yet supported. '
f'Today: {datetime.date.today()}, run_date: {run_date}'
)
# Download data from Sciensano
content = requests.get('https://epistat.sciensano.be/Data/COVID19BE_tests.csv', verify=False,).content
df_tests = pandas.read_csv(
io.StringIO(content.decode('utf-8')),
sep=',',
parse_dates=['DATE'],
usecols=['DATE', 'REGION', 'PROVINCE', 'TESTS_ALL_POS', 'TESTS_ALL']
).rename(columns={
'DATE': 'date'
})
# Reformat data into Rtlive.de format at country level all
df_tests_per_all_day = (df_tests
.assign(region='all')
.groupby('date', as_index=True)
.agg(new_cases=('TESTS_ALL_POS', 'sum'), new_tests=('TESTS_ALL', 'sum'), region=('region', 'first'))
)
df_tests_per_all_day = (df_tests_per_all_day
.reset_index()
.set_index(['region', "date"])
.sort_index()
)
# Redistribute the nan for the column TESTS_ALL_POS for regions Flanders, Wallonia and Brussels
df_tests_positive = (df_tests
.fillna('Nan')
.groupby(['date'])
.apply(redistribute, 'TESTS_ALL_POS')
.stack()
.reset_index()
.rename(columns={'REGION':'region', 0:'new_cases'})
)
# Redistribute the nan for the column TESTS_ALL for regions Flanders, Wallonia and Brussels
df_tests_all = (df_tests
.fillna('Nan')
.groupby(['date'])
.apply(redistribute, 'TESTS_ALL')
.stack()
.reset_index()
.rename(columns={'REGION':'region', 0:'new_tests'})
)
# Combine the total number of tests and the number of positive tests into a basetable
df_tests_per_region_day = pandas.concat([df_tests_all, df_tests_positive['new_cases']], axis=1).set_index(['region', 'date'])
# Test per province (Ignore the nan's for the moment)
df_tests_per_province_day = (df_tests[df_tests['REGION'] != 'Brussels']
.groupby(['PROVINCE', 'date'], as_index=False)
.agg(new_cases=('TESTS_ALL_POS', 'sum'), new_tests=('TESTS_ALL', 'sum'))
.rename(columns={'PROVINCE':'region'})
.set_index(['region', 'date'])
)
df_tests_per_province_day.index.name = ('region', 'date')
# Combine the results at country level with region level
data = pandas.concat([df_tests_per_all_day, df_tests_per_region_day, df_tests_per_province_day], axis=0).sort_index()
data.index = data.index.set_levels(data.index.levels[0].map(BE_REGION_INPUT_ABBR.get), 'region')
assert isinstance(data, pandas.DataFrame)
assert data.index.names == ('region', 'date')
assert 'new_cases' in data.columns, f'Columns were: {data.columns}'
assert 'new_tests' in data.columns, f'Columns were: {data.columns}'
for col in ['new_cases', 'new_tests']:
if any(data[col] < 0):
_log.warning(
f'Column {col} has {sum(data[col] < 0)} negative entries!! Overriding with NaN...'
)
data.loc[data[col] < 0, col] = numpy.nan
return data
def forecast_BE(df: pandas.DataFrame) -> Tuple[pandas.DataFrame, dict]:
"""
Applies test count interpolation/extrapolation to Belgium data.
Parameters
----------
df : pandas.DataFrame
Data as returned by data loader function.
Returns
-------
df : pandas.DataFrame
Input dataframe with a new column "predicted_new_tests" and an index expanded back to
01/01/2020 (filled with zeros until 13/05/2020) to account for the absence of tests in this
period.
results : dict
The fbprophet results by region
"""
# forecast with existing data
df["predicted_new_tests"], results = preprocessing.predict_testcounts_all_regions(
df, "BE"
)
# interpolate the initial testing ramp-up to account for missing data
df_list = []
for region in df.index.get_level_values(level='region').unique():
df_region = df.xs(region).copy()
df_complement = pandas.DataFrame(
index=pandas.date_range(
start='2020-01-01',
end=df_region.index.get_level_values(level='date')[0]
- pandas.DateOffset(1, 'D'),
freq="D",
),
columns=df_region.columns,
)
df_complement['predicted_new_tests'] = 0
df_region = df_complement.append(df_region)
df_region.index.name = 'date'
df_region.predicted_new_tests = df_region.predicted_new_tests.interpolate(
"linear"
)
df_region['region'] = region
df_list.append(df_region.reset_index().set_index(['region', 'date']))
return | pandas.concat(df_list) | pandas.concat |
import gc
import pandas as pd
from tqdm import tqdm
import os
import scanpy as sc
import scanpy.external as sce
from anndata import AnnData
import sys
from collections import defaultdict
import numpy as np
from typing import Union
from .utils import aggr_markers
# ---------------------------------
# Cell-type Calling
# ---------------------------------
def labeler(d,x):
try:
return d[x]
except:
return 'n/a'
def convert_marker_dict_to_df(marker_dict: dict):
"""
Converts marker dictionary format to a marker dataframe.
--------------------------------------
Example marker_dict:
{'tcell_activated': ['CD69', 'IL2RA'],
'tcell_effector': ['CD3D', 'B3GAT1', 'PDCD1', 'FAS', 'CCR7'],
'tcell_regulatory': ['CD4',
'IL2RA',
'FOXP3',
'SMAD3',
'STAT5A',
'STAT5B',
'IL10'],
'tcell_exhausted': ['CD3D',
'PDCD1',
'FAS',
'CCR7', .......
Example output:
--------------------------------------
Cell-Type Gene
0 tcell_activated CD69
1 tcell_activated IL2RA
2 tcell_effector CD3D
3 tcell_effector B3GAT1
4 tcell_effector PDCD1
5 tcell_effector FAS
6 tcell_effector CCR7
7 tcell_regulatory CD4
8 tcell_regulatory IL2RA
"""
pairs = list()
for key in marker_dict:
for gene in marker_dict[key]:
pairs.append((key,gene))
return pd.DataFrame(pairs).rename(columns={0:'Cell-Type',1:'Gene'})
def build_marker_set(
adata: AnnData, \
markers_df: pd.DataFrame, \
groupby: str = 'louvain', \
metric: str = 'sum', \
pval_thresh: float = 1e-5, \
lfc_thresh: float = 1.0, \
key_added: str = 'cell_type', \
cell_type_idx: str = 'Cell-Type', \
gene_idx: str = 'Gene', \
tweaks: Union[None, dict] = None, \
**kwargs \
):
"""
Build Marker set.
---------------------
This takes in an Anndata object, a dictionary mapping from
cell-types to markers of interest.
Inputs:
- adata: scanpy anndata object
- markers_df: markers dataframe or dictionary
- metric: metric for assigning cell-type
- pval_thresh: threshold for markers (DE adj. pval)
- lfc_thresh: threshold for markers (DE LFC)
- kwargs: inputs for aggr_markers
- key_added: key to add to adata object
- cell_type_idx: markers dataframe cell_type ID
- gene_id: markers dataframe gene ID
- tweaks: specific changes to label dict
- **kwargs: for sc.tl.rank_genes_groups call - any specific paramters for selecting marker genes
Outputs:
- scores: dataframe of scores that have the sum z-scores for each cell-type
for a given cluster; clusters with no marker genes are assigned -1
- aggr: aggregate list of marker genes by cell type; includes pvals, z-scores,
initial cluster assignment, gene name, what cell-type the marker comes from,
and what cell type it was ultimately labeled
- labels: dictionary mapping from cluster to labeled cell-type
"""
if tweaks is None:
tweaks = {}
assert groupby in list(adata.obs), 'Please ensure {} is a field in AnnData object.'.format(groupby)
sc.tl.rank_genes_groups(adata, groupby=groupby, **kwargs)
markers = aggr_markers(adata)
markers = markers[(markers['pvals_adj']<pval_thresh) & (markers['logfoldchanges']>lfc_thresh)]
if isinstance(markers_df, dict):
markers_df = convert_marker_dict_to_df(markers_df)
d = {}
dfs = list()
for cell_type in np.unique(markers_df[cell_type_idx]):
filt = markers[markers.names.isin(markers_df[markers_df[cell_type_idx]==cell_type][gene_idx].values)]
filt['cell_type'] = cell_type #heck
dfs.append(filt)
if metric == 'sum':
d[cell_type] = dict(filt.groupby('cluster').sum().scores)
elif metric == 'mean':
d[cell_type] = dict(filt.groupby('cluster').mean().scores)
else:
raise ValueError("Not yet implemented.")
# Compile scores
scores = | pd.DataFrame.from_dict(d) | pandas.DataFrame.from_dict |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import allocation
import investment
import mock
import pandas as pd
class TargetAllocationTest(unittest.TestCase):
def setUp(self):
self._allocation = [
{
"asset_class": "Investment Grade Bonds",
"allocation": ".2"
},
{
"asset_class": "Core U.S.",
"allocation": ".3"
},
{
"asset_class": "Small Cap",
"allocation": ".2"
},
{
"asset_class": "Pacific Rim Large",
"allocation": ".3"
},
{
"asset_class": "Cash",
"allocation": ".0"
}
]
self._allocation_data = """
[
{
"asset_class": "Investment Grade Bonds",
"allocation": ".2"
},
{
"asset_class": "Core U.S.",
"allocation": ".3"
},
{
"asset_class": "Small Cap",
"allocation": ".2"
},
{
"asset_class": "Pacific Rim Large",
"allocation": ".3"
},
{
"asset_class": "Cash",
"allocation": ".0"
}
]
"""
with mock.patch('allocation.open', mock.mock_open(read_data=self._allocation_data)) as m:
self._allocation = allocation.Target('data/target.json')
def test_as_dataframe(self):
expected = {investment.AssetClass.INVESTMENT_GRADE_BONDS: float(20),
investment.AssetClass.CORE_US: float(30),
investment.AssetClass.SMALL_CAP: float(20),
investment.AssetClass.PACIFIC_RIM_LARGE: float(30),
investment.AssetClass.CASH: float(0)
}
expected_pd = pd.DataFrame.from_dict(expected, orient='index')
| pd.testing.assert_frame_equal(self._allocation.dataframe, expected_pd) | pandas.testing.assert_frame_equal |
"""
Functions used to compile water quality data from files that have already undergone basic formatting to have the same
column headers and units. List of data sources is available in readme.md file.
Functions:
* format_lake_data: Create additional columns for date and sampling frequency and round to daily means
* calc_growth_window: Detects the growth window for each lake in each year it's sampled using the daily mean dataframe,
and sifts for the data within the growth window and during the pre-growth window period
* growth_window_means: Calculates rates and mean values for environmental variables during each growth window and during
the pre-growth window period
* gw_summary: prints a summary of statistics for bloom type and lake trophic status in the dataset
* select_daily_mean:
* get_tsi: calculate the trophic status index (TSI) for each lake and create a dataframe with columns for lake, TSI, and
trophic status
* get_coords_ts: assign coordinates and trophic status to each lake
* lake_summary:
<NAME>
"""
import pandas as pd
from dplython import DplyFrame, X, sift, select, arrange, mutate
import numpy as np
from scipy.signal import find_peaks
from scipy.signal import savgol_filter
def format_lake_data(all_lakes):
"""
General formatting for lake data. Adds columns for date (year, month, day, and day of year) and calculates the
number of samples collected each year. Creates a separate dataframe rounded to the daily mean and sifted for at
least 6 samples collected per year.
input:
all_lakes: Compiled DplyFrame containing in situ data for all lakes to be analyzed
output:
all_lakes: Compiled data with additional columns (not rounded to daily mean)
daily_mean: additional data frame containing the daily mean values for all numerical parameters
"""
# convert columns to appropriate data type
all_lakes.loc[:, 'chla'] = pd.to_numeric(all_lakes.loc[:, 'chla'])
all_lakes.loc[:, 'temp'] = pd.to_numeric(all_lakes.loc[:, 'temp'])
# convert date to datetime and create additional columns
all_lakes.loc[:, 'date'] = pd.to_datetime(all_lakes.loc[:, 'date'])
all_lakes.loc[:, 'year'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).year
all_lakes.loc[:, 'month'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).month
all_lakes.loc[:, 'day'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).day
all_lakes.loc[:, 'day_of_year'] = pd.PeriodIndex(all_lakes.loc[:, 'date'], freq='D').dayofyear
# round to the nearest day and convert back to datetime
all_lakes.loc[:, 'date'] = pd.PeriodIndex(all_lakes.loc[:, 'date'], freq='D')
all_lakes.loc[:, 'date'] = all_lakes.loc[:, 'date'].astype(str)
all_lakes.loc[:, 'date'] = pd.to_datetime(all_lakes.loc[:, 'date'])
# calculate daily mean
daily_mean = DplyFrame(all_lakes.groupby(['lake', 'date'], as_index=False).mean())
# arrange by date and drop rows where chlorophyll-a is not a number (nan)
daily_mean = daily_mean >> arrange(X.date)
daily_mean.dropna(subset=['chla'], inplace=True)
# add column for number of samples
master_mean_df = pd.DataFrame()
for name, group in daily_mean.groupby(['lake', 'year']):
group.loc[:, 'num_samples'] = len(group['chla'])
master_mean_df = DplyFrame(pd.concat([master_mean_df, group], axis=0))
daily_mean = DplyFrame(master_mean_df) >> sift(X.num_samples >= 6)
return all_lakes, daily_mean
def calc_growth_window(df, threshold_inc, num_sample_threshold):
"""
Detects the growth window period based on the the rate of change in chlorophyll-a concentration that has been
smoothed with the Savitzky-Golay filter. First, optima are flagged in the data using the find_peaks function,
indicating the end of a growth window. The growth window begins at the preceding minimum or when the rate
increases past the num_sample threshold (and if it doesn't increase past that threshold, it begins where the
rate increases above zero). Daily mean data is sifted for samples collected both within the growth window and
during the 1 and 2 weeks leading up to it (the pre-growth window), to be analyzed by the growth_window_means
function. See associated manuscript for full explanation of methods and rationale.
input:
df: DplyFrame containing daily mean in situ data for all lakes to be analyzed (from format_lake_data)
threshold_inc: minimum chlorophyll-a rate of change to constitute the start of the growth window when there
is no minimum flagged in the data.
num_sample_threshold: Minimum number of samples per year that will be retained in the growth window dataset.
output:
master_gw_df: Water quality data for all detected growth windows, compiled into one DplyFrame
springsummer_gw_doy: Dataframe containing the day of year for the start and end of each growth window
master_prev_2weeks_gw_df: Compiled water quality data for each 2 week pre-growth window
"""
# make empty dataframes (will be appended to later)
master_gw_df = pd.DataFrame(columns=['lake', 'date', 'year', 'season', 'day_of_year', 'start_day', 'end_day', 'chla_increase', 'chla_roc',
'chla', 'poc', 'tp', 'srp', 'par', 'ph', 'tkn', 'tdn', 'nh4', 'no2',
'no3', 'nox'])
master_prev_2weeks_gw_df = pd.DataFrame(columns=['lake', 'date', 'year', 'season', 'day_of_year', 'start_day', 'end_day',
'chla', 'chla_roc', 'poc', 'tp', 'srp', 'par', 'ph', 'tkn', 'tdn', 'nh4', 'no2',
'no3', 'nox'])
# sift data for minimum sampling frequency
df = df >> sift(X.num_samples >= num_sample_threshold)
for name, group in df.groupby(['lake', 'year']): # group by lake and year to detect growth windows
group.reset_index(inplace=True)
# determine savgol_filter window length (smaller window for fewer samples)
if group.loc[0, 'num_samples'] <= 15:
window_len = 3
else:
window_len = 5
# 1) smooth the data and find location of the optima along the smoothed line
savgol = savgol_filter(group['chla'], window_length=window_len, polyorder=1)
group.loc[:, 'savgol_chla'] = savgol
# calculate chlorophyll rate of change and flag all days above the threshold as true
group.loc[:, 'chla_roc'] = group.loc[:, 'savgol_chla'].diff() / group.loc[:, 'day_of_year'].diff()
group.loc[:, 'chla_increase'] = group.loc[:, 'chla_roc'].gt(threshold_inc)
# find peaks and minima
y = group['savgol_chla']
peaks, properties = find_peaks(y, prominence=2)
y2 = y * -1 # use -y to find the minima
minima, min_properties = find_peaks(y2, prominence=0.5)
# flag peaks in the dataframe
peaks = DplyFrame(peaks)
peak_df = group.loc[group.index.intersection(peaks[0])]
peak_df['max_flag'] = True
group = pd.merge(group, (peak_df >> select(X.day_of_year, X.max_flag)), how='left', left_on='day_of_year',
right_on='day_of_year')
# flag minima in the dataframe
minima = DplyFrame(minima)
trough_df = group.loc[group.index.intersection(minima[0])]
trough_df['min_flag'] = True
group = pd.merge(group, (trough_df >> select(X.day_of_year, X.min_flag)), how='left',
left_on='day_of_year', right_on='day_of_year')
# 2) find spring and summer or single growth windows for lakes with 2 or 1 defined peaks, respectively
num_peaks = len(group['max_flag'].dropna()) # count the number of optima in the data
if num_peaks == 2: # spring and summer growth windows occur
# find end date of growth window
spring_end_index = group.where(group.max_flag == True).first_valid_index()
spring_end_day = group.loc[spring_end_index, 'day_of_year']
# find start date of growth window
spring_group = group >> sift(X.day_of_year < spring_end_day)
num_minima = len(spring_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
spring_start_index = spring_group.where(spring_group.chla_increase == True).first_valid_index()
if spring_start_index is None: # if there is no valid increase beforehand
spring_start_index = spring_group.where(spring_group.chla_roc > 0).first_valid_index() # find first day with a rate above zero
if spring_start_index is None:
spring_start_day = spring_group.loc[spring_group.first_valid_index(), 'day_of_year'] # select first sampling day
else:
spring_start_day = spring_group.loc[(spring_start_index - 1), 'day_of_year'] # select first day with rate > 0
else:
spring_start_day = spring_group.loc[(spring_start_index - 1), 'day_of_year'] # select first day with rate > threshold_inc
if num_minima > 0: # a previous minimum is present
spring_start_index = spring_group.where(spring_group.min_flag == True).last_valid_index() # select day with minimum closest to the max
spring_start_day = spring_group.loc[spring_start_index, 'day_of_year']
# sift growth window data based on start and end dates
spring_gw = group >> sift(X.day_of_year <= spring_end_day) >> sift(X.day_of_year >= spring_start_day)
spring_gw.loc[:, 'season'] = 'spring'
spring_gw.loc[:, 'start_day'] = spring_start_day
spring_gw.loc[:, 'end_day'] = spring_end_day
# sift out 1 and 2 week pre-growth window data
spring_prev_2weeks_start_day = spring_start_day - 15
prev_2weeks_spring_df = group >> sift(X.day_of_year >= spring_prev_2weeks_start_day) >> sift(
X.day_of_year <= spring_start_day)
prev_2weeks_spring_df.loc[:, 'season'] = 'spring'
prev_2weeks_spring_df.loc[:, 'start_day'] = spring_prev_2weeks_start_day
prev_2weeks_spring_df.loc[:, 'end_day'] = spring_start_day
# append spring gw data to main dataframe
master_gw_df = pd.concat([master_gw_df, spring_gw], axis=0)
master_prev_2weeks_gw_df = pd.concat([master_prev_2weeks_gw_df, prev_2weeks_spring_df], axis=0)
# sift out spring data and repeat for summer
summer_df = group >> sift(X.day_of_year > spring_end_day)
# find end date of growth window
summer_end_index = summer_df.where(summer_df.max_flag == True).first_valid_index()
summer_end_day = summer_df.loc[summer_end_index, 'day_of_year']
# find start date of growth window
summer_group = summer_df >> sift(X.day_of_year < summer_end_day)
num_minima = len(summer_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
summer_start_index = summer_group.where(summer_group.chla_increase == True).first_valid_index()
if summer_start_index is None:
summer_start_index = summer_group.where(summer_group.chla_roc > 0).first_valid_index()
if summer_start_index is None:
summer_start_day = summer_group.loc[summer_group.first_valid_index(), 'day_of_year']
else:
summer_start_day = summer_group.loc[(summer_start_index-1), 'day_of_year']
else:
summer_start_day = summer_group.loc[(summer_start_index - 1), 'day_of_year']
if num_minima > 0: # a previous min is present
summer_start_index = summer_group.where(summer_group.min_flag == True).first_valid_index()
summer_start_day = summer_group.loc[summer_start_index, 'day_of_year']
# sift summer growth window data based on start and end dates
summer_gw = summer_df >> sift(X.day_of_year <= summer_end_day) >> sift(X.day_of_year >= summer_start_day)
summer_gw.loc[:, 'season'] = 'summer'
summer_gw.loc[:, 'start_day'] = summer_start_day
summer_gw.loc[:, 'end_day'] = summer_end_day
# sift out 1 and 2 week pre-growth window data
summer_prev_2weeks_start_day = summer_start_day - 15
prev_2weeks_summer_df = group >> sift(X.day_of_year >= summer_prev_2weeks_start_day) >> sift(
X.day_of_year <= summer_start_day)
prev_2weeks_summer_df.loc[:, 'season'] = 'summer'
prev_2weeks_summer_df.loc[:, 'start_day'] = summer_prev_2weeks_start_day
prev_2weeks_summer_df.loc[:, 'end_day'] = summer_start_day
# append summer gw data to main dataframe
master_gw_df = | pd.concat([master_gw_df, summer_gw], axis=0) | pandas.concat |
import os
import statistics
import time
from imblearn.over_sampling import *
from scipy import stats
from sklearn.decomposition import PCA
from sklearn.ensemble import IsolationForest, RandomForestRegressor, RandomForestClassifier,ExtraTreesClassifier
from sklearn.ensemble import VotingClassifier
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import f1_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.preprocessing import RobustScaler, MinMaxScaler, QuantileTransformer, StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
#import umap
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
#General parameters for the experiments
dataset_dir="../datasets"
n_samples_device = 800 #10000
window=10
n_recursive_windows=10
jump=10
initial_window=window
test_size=0.40
feat_list=[1,4,5,6]
n_feat_selec=15
model="_"
#Dataset to be read and processed
dataset_name="sleep_2min_800_longhash_noreboot_5fings.csv"#"sleep_2min_400.csv"
mac_model_file="../MAC-Model.txt"
df=pd.read_csv(dataset_dir+"/"+dataset_name, index_col=False, header=None)
#df=df.iloc[:df.shape[0]//2,:] #Half of the df
final_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
股票信息类
Created on 2019/01/02
@author: TabQ
@group : gugu
@contact: <EMAIL>
"""
from __future__ import division
import pandas as pd
from pandas.compat import StringIO
import json
import lxml.html
from lxml import etree
import random
import re
import time
from gugu.utility import Utility
from gugu.base import Base, cf
import sys
ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
]
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': random.choice(ua_list),
'Cache-Control': 'max-age=0',
}
class StockInfo(Base):
def stockProfiles(self):
"""
获取上市公司基于基本面的汇总数据信息
Return
--------
DataFrame or List: [{'symbol':, 'net_profit_cagr':, ...}, ...]
symbol: 代码
net_profit_cagr: 净利润复合年均增长率
ps: 市销率
percent: 涨幅
pb_ttm: 滚动市净率
float_shares: 流通股本
current: 当前价格
amplitude: 振幅
pcf: 市现率
current_year_percent: 今年涨幅
float_market_capital: 流通市值
market_capital: 总市值
dividend_yield: 股息率
roe_ttm: 滚动净资产收益率
total_percent: 总涨幅
income_cagr: 收益复合年均增长率
amount: 成交额
chg: 涨跌点数
issue_date_ts: 发行日unix时间戳
main_net_inflows: 主营净收入
volume: 成交量
volume_ratio: 量比
pb: 市净率
followers: 雪球网关注人数
turnover_rate: 换手率
name: 名称
pe_ttm: 滚动市盈率
total_shares: 总股本
"""
self._data = pd.DataFrame()
self._writeHead()
self._data = self.__handleStockProfiles()
self._data['issue_date_ts'] = self._data['issue_date_ts'].map(lambda x: int(x/1000))
return self._result()
def __handleStockProfiles(self):
try:
request = self._session.get(cf.XQ_HOME, headers=headers)
cookies = request.cookies
except Exception as e:
print(str(e))
page = 1
while True:
self._writeConsole()
try:
timestamp = int(time.time()*1000)
request = self._session.get(cf.XQ_STOCK_PROFILES_URL % (page, timestamp), headers=headers, cookies=cookies)
dataDict = json.loads(request.text)
if not dataDict.get('data').get('list'):
break
dataList = []
for row in dataDict.get('data').get('list'):
dataList.append(row)
self._data = self._data.append(pd.DataFrame(dataList, columns=cf.XQ_STOCK_PROFILES_COLS), ignore_index=True)
page += 1
time.sleep(1)
except Exception as e:
print(str(e))
return self._data
def report(self, year, quarter, retry=3, pause=0.001):
"""
获取业绩报表数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'code':, 'name':, ...}, ...]
code,代码
name,名称
eps,每股收益
eps_yoy,每股收益同比(%)
bvps,每股净资产
roe,净资产收益率(%)
epcf,每股现金流量(元)
net_profits,净利润(万元)
profits_yoy,净利润同比(%)
distrib,分配方案
report_date,发布日期
"""
self._data = pd.DataFrame()
if Utility.checkQuarter(year, quarter) is True:
self._writeHead()
# http://vip.stock.finance.sina.com.cn/q/go.php/vFinanceAnalyze/kind/mainindex/index.phtml?s_i=&s_a=&s_c=&reportdate=2018&quarter=3&p=1&num=60
self._data = self.__parsePage(cf.REPORT_URL, year, quarter, 1, cf.REPORT_COLS, pd.DataFrame(), retry, pause, 11)
if self._data is not None:
self._data['code'] = self._data['code'].map(lambda x:str(x).zfill(6))
return self._result()
def profit(self, year, quarter, retry=3, pause=0.001):
"""
获取盈利能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'code':, 'name':, ...}, ...]
code,代码
name,名称
roe,净资产收益率(%)
net_profit_ratio,净利率(%)
gross_profit_rate,毛利率(%)
net_profits,净利润(万元)
eps,每股收益
business_income,营业收入(百万元)
bips,每股主营业务收入(元)
"""
self._data = pd.DataFrame()
if Utility.checkQuarter(year, quarter) is True:
self._writeHead()
# http://vip.stock.finance.sina.com.cn/q/go.php/vFinanceAnalyze/kind/profit/index.phtml?s_i=&s_a=&s_c=&reportdate=2018&quarter=3&p=1&num=60
self._data = self.__parsePage(cf.PROFIT_URL, year, quarter, 1, cf.PROFIT_COLS, | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import glob
import csv
import os
# export options
csv_tf = True #.csv, default
csv_sep = '\t'
db_tf = True #.sl3 sqlite3 db
pickle_tf = True #.pkl pickle of pandas dataframe
#hdf5_tf = True #.h5, not working
json_tf = True #.json
# file name and path
prefix = 'output'+os.sep+'neiss'
def main(file_list=[]):
if len(file_list)==0:
file_list = glob.glob('data'+os.sep+'neiss*.tsv')
cols = ['case_id','trmt_date','psu','weight','stratum','age','sex','race','race_other','diag','diag_other','body_part','disposition','location','fmv','prod1','prod2','narr1','narr2']
for f in file_list:
print (f)
# create temp dataframe per file
# full options - http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html
tt = pd.read_csv(f, sep='\t', names=cols, header=1, quoting=csv.QUOTE_NONE, low_memory=False, \
skipinitialspace=True, warn_bad_lines=True, error_bad_lines=False, \
parse_dates=False, \
encoding='utf-8', \
) #nrows =10000)
# add sourcefile to data
tt['sourcefile'] = f
tt['trmt_date'] = pd.to_datetime(tt.trmt_date, format='%m/%d/%Y').dt.date
# create real dataframe, or append to it if it exists already
if 'df' in locals() or 'df' in globals():
df = | pd.concat([df,tt], axis=0) | pandas.concat |
#! /bin/env python3
### This module is designed to identify the genomic data files in our laboratory's data directories
##by <NAME>
## April 7, 2015, script_version 0.6
script_version = 0.13
script_subversion = 22
##Note: Need to test the ability to link read files to the assembly, or inventory them independently
#pylint: disable=global-statement, broad-except
import re
import os
import pandas as pd
from Bio import SeqIO
import sys
import gzip
import zlib
import stat
import time
from shutil import copyfile
from collections import defaultdict
import functools
import urllib.request
import utilities
import seq_utilities
import NGS_data_utilities
# import seq_utilities
_verbose = False ##Set to True by Debug
Repository = '' #This must be set either by the user or from the settings file
ASSEMBLY_DIR_REPO = 'assemblies'
READ_DIR_REPO = 'reads'
INVENTORY_FILE_BASE = 'inventory.tab'
READMAP_FILE = 'read_assembly_map.tab'
read_ext = '.fastq.gz'
# read454_Ext = '.sff'
my_file = __file__
if os.path.islink(my_file):
my_file = os.path.realpath(my_file)
SCRIPT_DIR, SCRIPT_NAME = os.path.split(my_file)
SCRIPT_DIR = os.path.abspath(SCRIPT_DIR)
############################
#### Functions for organizing files (e.g. interpreting names)
#########################
def getBaseID(sample_name,version):
return "{}_v{}".format(sample_name,version)
# Helper functions to improve the CLI for outside scripts
def default_list(destination_directory):
if os.path.isdir(destination_directory):
result = os.path.join(destination_directory,'genome_list.tab')
else:
result = None
return result
def placeAssembliesIntoDataFrame(argv,GO_settings=None,repository=None,rename_duplicateID=True,drop_duplicate_files=True,deep_search=True):
return place_WGS_records_into_dataframe(argv,GO_settings,repository,rename_duplicateID,drop_duplicate_files,is_reads=False,deep_search=deep_search)
def placeReadsIntoDataFrame(argv,GO_settings=None,repository=None,rename_duplicateID=True,drop_duplicate_files=True):
return place_WGS_records_into_dataframe(argv,GO_settings,repository,rename_duplicateID,drop_duplicate_files,is_reads=True)
def place_WGS_records_into_dataframe(argv,GO_settings=None,repository=None,rename_duplicateID=True,drop_duplicate_files=True,is_reads=False,deep_search=True):
assert len(argv) > 1, "No arguments passed. Failure."
result = None
isolates = None
main_arg = argv[1]
if os.path.exists(main_arg):
if len(argv) == 3: ## Main file and a genome name
if os.path.isfile(main_arg):
genome_name = argv[2]
result = pd.DataFrame({'Lab_ID':[genome_name],'Filename':[main_arg]})
else:
print("usage: {} GenomeFile GenomeName".format(os.path.basename(argv[0])))
print("\tNot a file: {}".format(argv[1]))
print("\tFull path: {}".format(os.path.abspath(argv[1])))
elif len(argv) == 2: #a single argument pointing to a group of files
if os.path.isdir(main_arg): #assemble group list
print('## Scanning genome directory ##')
result = NGS_data_utilities.listReadFilesWithNames(main_arg) if is_reads else NGS_data_utilities.listGenomeFilesWithNames(main_arg,deep_search=deep_search)
print("## Finished scanning directory ## \n")
elif os.path.isfile(main_arg): #read group list
print("## Reading table ##")
df = pd.read_table(main_arg)
print("Table contains {} sequences to analyze".format(len(df)))
if "Filename" in df.columns:
print("\tUser provided assembly files")
result = df.copy()
elif "Lab_ID" in df.columns:
file_type = 'read' if is_reads else 'assembly'
print("\tCould not identify 'Filename' field. Pulling {} files from repository".format(file_type))
isolates = df['Lab_ID'].tolist()
else:
print("Cannot parse file. Please provide a tab-delimited table with headers 'Filename' and 'Lab_ID'")
##Leaves result as none
else:
print("Unable to interpret command line. Too many arguments")
else: #Finally, test if these are a list of isolates
print("The supplied argument is not a directory or file; assuming it is an isolate ID")
isolates = argv[1:]
if result is None and isolates is not None:
if repository is None:
settingDict = get_default_settings(GO_settings)
if settingDict is not None:
repository = settingDict['repository']
if repository is not None and os.path.isdir(repository):
inventory = InventoryReader(repository)
if inventory.valid:
if is_reads:
print("Error: ability to select reads by isolate ID is not supported. Contact developer.")
result = None
else:
gd,_ = inventory.getAssemblyRecords(isolates, ActiveOnly=True)
result= gd[NGS_data_utilities.dfHeaders]
else:
result = None
else:
print("Cannot find repository at {}:".format(repository))
if result is None or len(result)==0:
print("Unable to parse arguments")
else:
if drop_duplicate_files:
result = result.drop_duplicates() ##for those situations where the direcotry was indexed twice... no big deal
##Make sure no two sequence files use the same genome name (this is used to identify intermediate files -- mainly matters for debugging
if rename_duplicateID:
NGS_data_utilities.assignUniqueID(result)
return result
###########################
#### Functions for manipulating the inventory files
###########################
def forcedIntCasting(my_series):
return my_series.str.split('.').str.get(0).astype(int)
##alt strategy is to first cast to float
##Allow exceptions to rise to the next level... though maybe I could return None to indicate failure
##Test that each isolates has one and only one record
def recordsAreCompleteAndUnique(Records,isolates):
valid = True
for i in isolates:
valid = sum(Records['Lab_ID'] == i) == 1
if not valid:
break
return valid
##Active only should only be selected when reading only
def assemblySetup(assembly_file,relative_path=None,ValidOnly=False): ##TODO: the udpate file does not necessarily have all the fields of the assembly frame .. which causes problem
try:
assembly_frame = pd.DataFrame(pd.read_table(assembly_file,dtype=str),copy=True) ##If we're not careful, it will interpret ints as floats and cause bugs
assembly_frame.dropna(how='all',inplace=True)
for c in inventoryHeaders:
if c not in assembly_frame.columns:
assembly_frame[c] = None ##None causes problems with type-casting, below
except IOError:
assembly_frame = pd.DataFrame(columns=inventoryHeaders)
if len(assembly_frame) > 0: ##Make filepaths absolute : all assembly files are in the repository
if relative_path is not None:
inventory_path_join = functools.partial(DeRelativizePath,relative_path)
for c in assFileKeys:
if c in assembly_frame.columns:
good_values = assembly_frame[c].notnull()
assembly_frame.loc[good_values,c] = assembly_frame[good_values][c].apply(inventory_path_join)
#Set index
assembly_frame['Version'] = assembly_frame['Version'].astype(int)
assembly_frame.set_index(['Lab_ID','Version'],drop=False,inplace=True)
#Make temporary changes that affect empty cells; use "valid" to revert them
frame_valid = assembly_frame.notnull()
assembly_frame['Invalid'] = assembly_frame['Invalid'].isin(['True','TRUE','1','1.0','yes','Yes','YES'])
assembly_frame['Active'] = (~assembly_frame['Active'].isin(['False','FALSE','0','0.0','no','No','NO'])) & (assembly_frame['Invalid'] != True)
if ValidOnly:
assembly_frame = assembly_frame[assembly_frame['Invalid'] != True]
assembly_frame['Gaps'] = ~assembly_frame['Gaps'].isin(['False','FALSE','No','NO','no','0'])
##Cast everything to the appropriate datatype
castAssemblyColumns(assembly_frame)
return assembly_frame, frame_valid
##Address this bug: https://github.com/pydata/pandas/issues/4094
def castAssemblyColumns(assembly_frame):
for _,row in inventoryHeadersFrame.iterrows():
field = row["Field"]
field_type = row['dtype']
if field in assembly_frame.columns:
try:
assembly_frame[field] = assembly_frame[field].astype(field_type)
except (ValueError,TypeError):
no_good = True
if field_type is int:
try:
assembly_frame[field] = forcedIntCasting(assembly_frame[field])
except:
no_good = True
else:
no_good =False
print("Forced floating points to ints for {}".format(field))
if no_good:
print("Failed to cast {} to {}".format(field,field_type))
##Active only should only be selected when reading only
def readSetup(read_file,relative_path=None,ValidOnly=False):
try:
read_frame = pd.DataFrame(pd.read_table(read_file,dtype=str),copy=True)
read_frame.dropna(how='all', inplace=True)
for c in readInventoryHeaders:
if c not in read_frame.columns:
read_frame[c] = None
except IOError:
read_frame = pd.DataFrame(columns=readInventoryHeaders)
if len(read_frame) > 0: #Make filepaths absolute: reads must be in GWA or have HTTP
if relative_path is not None:
read_path_join = functools.partial(DeRelativizePath,relative_path)
rf = read_frame
for c in readFileKeys + ['Original_Read1','Original_Read2']:
relative_reads = utilities.avoidItemsThatStartWith(rf,c,'http:') ##Anything that is not HTTP is relative
rf.loc[relative_reads,c] = rf[relative_reads][c].apply(read_path_join)
#Set index
try:
read_frame['Read_Set'] = read_frame['Read_Set'].astype(int)
except:
try:
read_frame['Read_Set'] = forcedIntCasting(read_frame['Read_Set'])
except ValueError:
print("Failed to cast {} to {}".format('Read_Set',int))
read_frame.set_index(['Lab_ID','Read_Set'],drop=False,inplace=True)
#Make temporary changes that affect empty cells; use "valid" to revert them
frame_valid = read_frame.notnull()
read_frame['Invalid'] = read_frame['Invalid'].isin(['True','TRUE','1'])
if ValidOnly:
read_frame = read_frame[read_frame['Invalid'] != True]
return read_frame, frame_valid
def readAssemblyMapSetup(readmap_file):
readmap_frame = pd.read_table(readmap_file,dtype=str) if os.path.isfile(readmap_file) else pd.DataFrame(columns=readAssemblyMapHeaders)
##Cast everything to the appropriate datatype
for _,row in readAssemblyMapFrame.iterrows():
field = row["Field"]
field_type = row['dtype']
try:
readmap_frame[field] = readmap_frame[field].astype(field_type)
except ValueError:
no_good = True
if field_type is int:
try:
readmap_frame[field] = forcedIntCasting(readmap_frame[field])
except:
no_good = True
else:
no_good =False
print("Forced floating points to ints for {}".format(field))
if no_good:
print("Failed to cast {} to {}".format(field,field_type))
return readmap_frame
def DeRelativizePath(start,path):
if path[0] != '/':
result = os.path.normpath(os.path.join(start,path))
else:
result = path
return result
def conditionalRelativePath(start,path):
if path[0] != '/':
raise ValueError("Must work with absolute path, not {}".format(path))
shared = os.path.commonprefix([start,path])
if len(shared) > 1: #Not just '/
result = os.path.relpath(path,start)
else:
result = path #No shared path, so no value in making it relative
return result
def assemblyPackup(assembly_frame,relative_path):
af = assembly_frame.copy()
# ass_rel_path = functools.partial(conditionalRelativePath,start=relative_path)
# is_str = functools.partial(isinstance,classinfo=str)
for c in assFileKeys:
if c in af.columns:
good_values = (af[c].notnull()) & (af[c] != '') & (af[c] != 'nan')
for g in good_values.index:
if good_values[g]:
try:
af.loc[g,c] = conditionalRelativePath(relative_path,af.loc[g,c])
except ValueError as e:
print("Error on {}, index {}".format(c,g))
print(e)
raise
return af
def readPackup(read_frame,relative_path):
rf = read_frame.copy()
# reads_rel_path = functools.partial(conditionalRelativePath,start=relative_path)
for c in readFileKeys:
relative_reads = utilities.avoidItemsThatStartWith(rf,c,'http:') ##Anything that is not HTTP is made relative
for r in relative_reads.index:
if relative_reads[r]:
rf.loc[r,c] = conditionalRelativePath(relative_path,rf.loc[r,c])
# temp = rf[relative_reads][c]
# rf.loc[relative_reads,c] = temp.apply(reads_rel_path)
return rf
############ Class Based Inventory Manager #######################
##This is basically a wrapper for a set of dataframes representing the inventory files that controls a subdirectories in a repository
class Inventory:
#Why arent' these just global constants?
ASSEMBLY_DIR_REPO = 'assemblies'
READ_DIR_REPO = 'reads'
BAM_DIR_REPO = 'BAM'
INVENTORY_FILE_BASE = 'inventory.tab'
READMAP_FILE = 'read_assembly_map.tab'
def __init__(self,repository=None):
self.assembly_frame = self.read_frame = self.contig_frame = self.readmap_frame = None
self.assembly_valid = self.read_valid = None
if repository is None:
repository = get_default_settings(SETTING_PATH)['repository']
self.setRepository(repository)
self.valid = False ##Needs to activate repository by initiating reader or writer
self.readInvTemplate = pd.DataFrame(columns = readInventoryHeaders)
def setRepository(self,repository):
##Set paths
self.repository = os.path.abspath(repository)
self.assembly_directory = os.path.join(self.repository,self.ASSEMBLY_DIR_REPO)
self.read_directory = os.path.join(self.repository,self.READ_DIR_REPO)
self.assembly_file = os.path.join(self.assembly_directory,self.INVENTORY_FILE_BASE)
self.contig_file = utilities.appendToFilename(self.assembly_file,'_contigs')
self.read_file = os.path.join(self.read_directory,self.INVENTORY_FILE_BASE)
self.readmap_file = os.path.join(self.assembly_directory,self.READMAP_FILE)
self.BAM_directory = os.path.join(self.repository,self.BAM_DIR_REPO)
def repositoryExists(self):
result = True
result &= os.path.isdir(self.repository)
result &= os.path.isdir(self.assembly_directory)
result &= os.path.isdir(self.read_directory)
return result
def activateRepository(self,ValidOnly,r_verbose=True): #pylint: disable=attribute-defined-outside-init
result = False
if self.repositoryExists():
try:
if r_verbose or _verbose:
print("Activating repository at "+self.repository)
### Load the frames
self.assembly_frame, self.assembly_valid = assemblySetup(self.assembly_file,self.assembly_directory,ValidOnly=ValidOnly)
vprint('Found {} valid assemblies and {} invalid'.format(sum(~self.assembly_frame.Invalid),sum(self.assembly_frame.Invalid)))
## REad frame
self.read_frame, self.read_valid = readSetup(self.read_file,self.read_directory,ValidOnly=ValidOnly)
vprint('Found {} valid read sets and {} invalid'.format(sum(~self.read_frame.Invalid),sum(self.read_frame.Invalid)))
##Other frames
self.contig_frame = pd.read_table(self.contig_file,dtype=str) if os.path.isfile(self.contig_file) else pd.DataFrame(columns=inventoryContigHeaders)
self.contig_frame['Version'] = self.contig_frame['Version'].astype(float).astype(int) ##This keeps getting saved as a floats (0.0) and can't be directly cast to int
self.readmap_frame = readAssemblyMapSetup(self.readmap_file)
result = True
except:
print("Error reading repository files")
raise
if not result:
print("ERROR: Unable to activate repository at "+self.repository)
print("")
return result
##Splits input frame into two parts: one for import (new), and one for lookup (existing)
def compareWithExistingReads(self,inputFrame):
##First, split the input frame into old and new
inputFrame = inputFrame.append(self.readInvTemplate) ##New object. Original input frame is unmodified.
read1Bool = inputFrame.Read1.isin(self.read_frame.Original_Read1)
read2Bool = inputFrame.Read2.isin(self.read_frame.Original_Read2)
assert (read1Bool == read2Bool).all(), "Existing read files are matched with new files. Cannot proceed"
readsExistingBool = (read1Bool & read2Bool)
readsExisting = inputFrame[readsExistingBool].copy()
readsNew = inputFrame[~readsExistingBool].copy()
##Now find the matching reads already in the database
Eread1Bool = self.read_frame.Original_Read1.isin(inputFrame.Read1)
Eread2Bool = self.read_frame.Original_Read2.isin(inputFrame.Read2)
assert (Eread1Bool == Eread2Bool).all(), "Existing read files are matched with new files (2). Cannot proceed"
EreadsExsitingBool = (Eread1Bool & Eread2Bool)
readsExistingInDB = self.read_frame[EreadsExsitingBool].copy()
## Transfer the ReadSet identifier to the input frame to set index and copy files
for i, row in readsExisting.iterrows():
match = (readsExistingInDB.Original_Read1 == row['Read1']) & (readsExistingInDB.Original_Read2 == row['Read2'])
read_set_list = readsExistingInDB[match]['Read_Set'].unique().tolist()
if len(read_set_list) == 1:
rs = read_set_list[0]
readsExisting.loc[i,'Read_Set'] = rs
else:
##TODO: this has not been tested
print("Waring:")
print("\tDuplicate entries for {}. Matching to version with most links".format(row.Lab_ID))
try:
self.countReadLinks(False) ##recalculate all
idx = readsExistingInDB[match].idxmax('Link_Count')
readsExisting.loc[i,'Read_Set'] = readsExistingInDB.loc[idx,'Read_Set']
except:
raise RuntimeError("Duplicate entries for {}. Cannot match to existing reads".format(row.Lab_ID))
return readsExisting, readsNew
##TODO: Not tested
###If this is going to be saved, the read_valid frame needs to be modified
def countReadLinks(self,NaOnly=True):
lc = 'Link_Count'
if lc not in self.read_frame.columns:
self.read_frame[lc] = None
update_rows = self.read_frame[lc].isnull()
if not NaOnly: ## everything
update_rows |= self.read_frame[lc].notnull()
for i, r in self.read_frame[update_rows].iterrows():
link_count = int(sum((self.readmap_frame.Lab_ID == r['Lab_ID']) & (self.readmap_frame.Read_Set == r['Read_Set'])))
self.read_frame.loc[i,'Link_Count'] = link_count
###Warning: this will relead the inventory. Save first!
def linkInvalidAssToReads(self):
self.valid = self.activateRepository(ValidOnly=False,r_verbose=False) ##Reload for writing
invalidAss = self.assembly_frame[self.assembly_frame.Invalid]
linkBool = (self.readmap_frame.Lab_ID.isin(invalidAss.Lab_ID) & self.readmap_frame.Assembly_Version.isin(invalidAss.Version))
invalidLinks = self.readmap_frame[linkBool]
readBool = self.read_frame.Read_Set.isin(invalidLinks.Read_Set) & self.read_frame.Lab_ID.isin(invalidLinks.Lab_ID)
questionableReads = self.read_frame[readBool].copy()
self.valid = self.activateRepository(ValidOnly=True,r_verbose=False) ##Reload for writing
return questionableReads
class InventoryReader(Inventory):
def __init__(self,repository=None):
super().__init__(repository)
self.valid = self.activateRepository(ValidOnly=True)
def duplicateInventoryFiles(self,directory):
print("Writing duplicate inventory files in "+directory)
self.valid = self.activateRepository(ValidOnly=False,r_verbose=False) ##Reload for writing
##Retain absolute paths rather than making them relative
utilities.safeMakeDir(directory)
if len(self.assembly_frame) > 0:
dest= os.path.join(directory,'assembly_inventory.tab')
try:
assemblyPackup(self.assembly_frame.where(self.assembly_valid),directory).to_csv(dest,sep='\t',index=False)
except IOError:
print("ERROR: Unable to write to {}".format(dest))
if len(self.contig_frame) > 0:
dest = os.path.join(directory,'contig_inventory.tab')
try:
self.contig_frame.to_csv(dest,sep='\t',index=False)
except IOError:
print("ERROR: Unable to write to {}".format(dest))
if len(self.read_frame) > 0:
dest = os.path.join(directory,'read_inventory.tab')
try:
readPackup(self.read_frame.where(self.read_valid),directory).to_csv(dest,sep='\t',index=False)
except IOError:
print("ERROR: Unable to write to {}".format(dest))
if len(self.readmap_frame) > 0:
dest = os.path.join(directory,'reads_to_assemblies.tab')
try:
self.readmap_frame.to_csv(dest,sep='\t',index=False)
except IOError:
print("ERROR: Unable to write to {}".format(dest))
self.valid = self.activateRepository(ValidOnly=True,r_verbose=False)
def getAssemblyRecords(self,isolates,ActiveOnly = True):
assert isinstance(isolates,list)
##Select assemblies
af = self.assembly_frame
allowedVersions = af['Active'] != False if ActiveOnly else af['Invalid'] != True
activeFrame = af[af['Lab_ID'].isin(isolates) & allowedVersions].copy()
##Check that we got everything
invList = activeFrame['Lab_ID'].tolist()
invSet = set(invList)
if len(invSet) < len(invList):
print("Notice: Duplicate Active assemblies in inventory. Exporting all.")
reqSet = set(isolates)
if len(reqSet) != len(isolates):
print('Notice: Duplicate values in isolate request list')
assert reqSet.issuperset(invSet)
if reqSet != invSet:
print("Notice: Not all genomes are present in our assembly Repository")
##Report if any are present as reads only
missingSet = reqSet.difference(invSet)
rf = self.read_frame
readsOnly = rf[rf['Lab_ID'].isin(missingSet)]
if len(readsOnly) > 0:
print ("The following isolates are present in the read collection: {}".format(",".join(readsOnly['Lab_ID'].unique())))
### Get information about the contigs
cf = self.contig_frame.set_index(['Lab_ID','Version'],drop=False)
activeContigs = cf[cf['Lab_ID'].isin(activeFrame['Lab_ID'])]
return activeFrame, activeContigs
def exportAssemblies(self,isolates,destination,ActiveOnly=True,InventoryOnly=False, include_qual= False, include_tech=True):
assert isinstance(isolates,list)
if self.repository in destination:
print("Cannot write to repository using this method. This is serious, so we're stopping everything")
sys.exit(1)
print("Exporting assemblies: {}".format(",".join(isolates)))
utilities.safeMakeDir(destination)
af,cf = self.getAssemblyRecords(isolates,ActiveOnly)
contig_file = os.path.join(destination,os.path.basename(self.contig_file))
assembly_file = os.path.join(destination,os.path.basename(self.assembly_file))
utilities.safeOverwriteTable(contig_file, cf, 'tab',index=False)
if not InventoryOnly:
af.rename(columns={'Filename':'Repository_File'},inplace=True)
for idx, row in af.iterrows():
baseID = getBaseID(idx[0],idx[1]) #Lab_ID and Version
if include_tech:
tech = row['Technology'] if pd.notnull(row['Technology']) else "TechUnavail"
baseID += '_' + tech
dest_filename = os.path.abspath(os.path.join(destination,baseID) + '.fasta')
exportGenomeFASTA(row['Repository_File'],dest_filename,include_qual = include_qual)
af.loc[idx,'Filename'] = dest_filename
af['File_Basename'] = af.Filename.apply(os.path.basename)
utilities.safeOverwriteTable(assembly_file,af,'tab',index=False)
return af
def exportReads(self,isolates,destination,linked=True,InventoryOnly=False):
assert isinstance(isolates,list)
if self.repository in destination:
print("Cannot write to repository using this method. This is serious, so we're stopping everything")
sys.exit(1)
print("Exporting reads: {}".format(",".join(isolates)))
utilities.safeMakeDir(destination)
rf = self.read_frame
new_frame = rf[rf.Lab_ID.isin(isolates)]
exportReadFrame(new_frame,destination,linked=linked,InventoryOnly=InventoryOnly)
def getReadsWithoutAssembly(self,isolate_level = True):
result = None
if isolate_level:
assembly_list = self.assembly_frame['Lab_ID'].unique().tolist()
rf = self.read_frame
result = rf[~rf['Lab_ID'].isin(assembly_list)]
else: ##Use the read-assembly linkage
print("Not implemented")
return result
class InventoryWriter(Inventory):
def __init__(self,repository=None):
super().__init__(repository)
self.createRepositoryPaths()
self.valid = self.activateRepository(ValidOnly=False)
def createRepositoryPaths(self):
utilities.safeMakeDir(self.repository)
utilities.safeMakeDir(self.assembly_directory)
utilities.safeMakeDir(self.read_directory)
###UPdate file is a hand-modified file; this is to assure that I don't accidentally delete data
def update_assemblies(self,update_file):
df,df_valid = assemblySetup(update_file,ValidOnly=False) ##Sets the index
df = df.where(df_valid) ##So that we don't overwrite the filled-in boolean values
### Remove fields that we do not want to update
update_columns = [x for x in df.columns if x not in inventoryHeadersStatic]
df = df[update_columns].copy() ##Index has already been set
##Update and reset the "valid" mask. Must first masked the current auto-fill values (Active, Invalid).
af = self.assembly_frame.where(self.assembly_valid) ##So that we can track which fields have been filled in
af.update(df)
# castAssemblyColumns(af) ##previous step changes everything to float for some stupid reason (both af and df treat it as int)
self.assembly_valid = af.notnull()
##Test if there has been updating
same = (af.where(self.assembly_valid) == self.assembly_frame.where(self.assembly_valid))
updated = not all(same.apply(all))
if updated:
self.assembly_frame.update(af) ##Keep the auto-filled values from assembly setup
castAssemblyColumns(self.assembly_frame) ##Updating on subset changes everything to float. Can only cast back if default values are filled (no null)
else:
print("Unable to update assembly inventory with "+update_file)
return updated
###UPdate file is a hand-modified file; this is to assure that I don't accidentally corrupt the data in an obvious way and there is a backup copy
def update_reads_fromFile(self,update_file):
df,df_valid = readSetup(update_file,ValidOnly=False)
df = df.where(df_valid)
rf = self.read_frame.where(self.read_valid)
rf.update(df)
self.read_valid = rf.notnull()
same = (rf.where(self.read_valid) == self.read_frame.where(self.read_valid))
updated = not all(same.apply(all))
self.read_frame.update(rf)
if not updated:
print("Unable to update read inventory with "+update_file)
return updated
def saveInventoryFiles(self):
##Note: indicies are not dropped from the regular fields, so they should not be saved
if len(self.assembly_frame) > 0:
temp_ass = assemblyPackup(self.assembly_frame.where(self.assembly_valid),self.assembly_directory)
utilities.safeOverwriteTable(self.assembly_file, temp_ass, 'tab',index=False)
if len(self.contig_frame) > 0:
utilities.safeOverwriteTable(self.contig_file, self.contig_frame,'tab',index=False)
if len(self.read_frame) > 0:
rf = readPackup(self.read_frame.where(self.read_valid),self.read_directory)
utilities.safeOverwriteTable(self.read_file,rf,'tab',index=False)
if len(self.readmap_frame) > 0:
utilities.safeOverwriteTable(self.readmap_file,self.readmap_frame,'tab',index=False)
###newFrame has new reads to incorporate into the inventory.
### existingUpdate has reads that already exist, but may have some additional data to incorporate (must be a free-form field; not one that gets default values added -- e.g. Invalid)
def mergeReadsWithInventory(self,newFrame,existingUpdate):
readsAdded = pd.DataFrame()
for _, row in newFrame.iterrows():
read_set_frame = self.read_frame[self.read_frame['Lab_ID'] == row['Lab_ID']]
candidate_set = len(read_set_frame)
while candidate_set in read_set_frame.Lab_ID:
candidate_set += 1
assert candidate_set < 2 * len(read_set_frame) + 1, "Unable to calculate a new read sets id for {}. Something is wrong. Aborting ".format(row['Lab_ID'])
row.loc['Read_Set'] = candidate_set
self.read_frame = self.read_frame.append(row,ignore_index=True)
readsAdded = readsAdded.append(row,ignore_index=True)
if len(existingUpdate) > 0:
self.read_frame.update(existingUpdate.set_index(['Lab_ID','Read_Set'],drop=False) ,overwrite=False) ##Be conservative about updating. Don't want to modify timestamps... or do i?
readsAdded = readsAdded.append(existingUpdate)
# .loc[idx] = row ##record which Read_Set was assigned to these reads
return readsAdded
def addReadsToInvenotry(self,newReads):
pass
def addAssembliesToInventory(self,newAss):
pass
def ingestBAMbyLinkage(self,assembliesAdded):
utilities.safeMakeDir(self.BAM_directory)
if 'BAM_File' in assembliesAdded.columns:
for _, row in assembliesAdded[assembliesAdded.BAM_File.notnull()].iterrows():
src = row.loc['BAM_File']
map_ext = os.path.splitext(src)[1].lstrip('.')
base_ID = getBaseID(row.loc['Lab_ID'], row.loc['Version'])
dest = os.path.join(self.BAM_directory,"{}.{}".format(base_ID,map_ext))
try:
os.link(src,dest)
except IOError:
print("Unable to link mapping file. Trying to copy instead...\n\t"+dest)
try:
copyfile(src,dest)
except IOError:
print("Unable to copy mapping file... \n\t" + dest)
################ Traditional Method-based with Globals ###################
###Set globals
#pylint: disable=W0601
def setRepositoryPaths(new_repository,ingestion=False):
global Repository,RepoAssemblyDir,RepoReadDir,RepoAssemblyInvFile,RepoContigInvFile,RepoReadInvFile,RepoReadMapFile
Repository = os.path.abspath(new_repository)
if ingestion:
utilities.safeMakeDir(Repository)
RepoAssemblyDir = os.path.join(Repository,ASSEMBLY_DIR_REPO)
if ingestion:
utilities.safeMakeDir(RepoAssemblyDir)
RepoReadDir = os.path.join(Repository,READ_DIR_REPO)
if ingestion:
utilities.safeMakeDir(RepoReadDir)
RepoAssemblyInvFile = os.path.join(RepoAssemblyDir,INVENTORY_FILE_BASE)
RepoContigInvFile = utilities.appendToFilename(RepoAssemblyInvFile,'_contigs')
RepoReadInvFile = os.path.join(RepoReadDir,INVENTORY_FILE_BASE)
RepoReadMapFile = os.path.join(RepoAssemblyDir,READMAP_FILE)
chksum_prefix = 'Adler32_'
#Required means that the input file must have this field
##Fields can only be int if they are guaranteed to exist in every sample
inventoryHeadersFrame = pd.DataFrame(columns=['Field','dtype','Required','Recommended','Updatable'],data=[
('Lab_ID',str,True,True,False),
('Version',int,False,False,False),
('Filename',str,True,True,False),
('BAM_File',str,False,True,False),
('Project',str,False,True,True),
('Date_Created',str,False,False,False), ##Can be cast with pd.to_datetime,False), but apparently not with astype
('Date_Ingested',str,False,False,False),
('Technology',str,True,True,True),
('Assembler',str,False,True,True),
('BaseCaller',str,False,True,True),
('Person Performing Analysis',str,True,True,True),
('Institution Performing Analysis',str,True,True,True),
('Gaps',bool,False,True,True),
('Contig_Count',int,False,False,False),
('Bases_In_Contigs',int,False,False,False),
('Large_Contig_Count',int,False,False,False),
('Small_Contig_Count',int,False,False,False),
('Bases_In_Large_Contigs',int,False,False,False),
('Bases_In_Small_Contigs',int,False,False,False),
('Mean_Depth_of_Coverage',float,False,True,True),
('Reference_Assembly',str,False,True,True),
('Original_Filename',str,False,False,False),
(chksum_prefix+'Filename',str,False,False,False),
('Active',bool,False,False,True),
('Invalid',bool,False,True,True),
('Notes',str,False,True,True)
])
inventoryHeaders = [x for x in inventoryHeadersFrame['Field'].tolist()]
reqAssFields = inventoryHeadersFrame['Required']
inventoryHeadersRequired = [x for x in inventoryHeadersFrame[reqAssFields]['Field'].tolist()]
recAssFields = inventoryHeadersFrame['Recommended']
inventoryHeadersRecommended = [x for x in inventoryHeadersFrame[recAssFields]['Field'].tolist()]
updateAssFields = inventoryHeadersFrame['Updatable']
inventoryHeadersStatic = [x for x in inventoryHeadersFrame[~updateAssFields]['Field'].tolist()]
inventoryContigHeaders = ['Invalid','Notes','Ambiguous','Version','Lab_ID']
readAssemblyMapFrame = pd.DataFrame(columns=['Field','dtype','Required'],data=[
('Lab_ID',str,True),
('Read_Set',int,True),
('Assembly_Version',int,True),
('Notes',str,False),
])
readAssemblyMapHeaders = [x for x in readAssemblyMapFrame['Field'].tolist()]
# ['Lab_ID','Assembly_Version','Read_Set','Notes'] ##There is no natural key field in this table
readInventoryHeadersFrame = pd.DataFrame(columns=['Field','dtype','Required','Recommended'],data=[
('Lab_ID',str,True,True),
('Read1',str,False,True),
('Read2',str,False,True),
('Read_Set',int,False,False),
('Original_Read1',str,False,False),
('Original_Read2',str,False,False),
('Institution Performing WGS',str,True,True),
('Person Performing WGS',str,True,True),
('Technology',str,True,True),
('Machine_Class',str,False,True),
('Machine_ID',str,False,True),
('Reaction_Parameters',str,False,True), #e.g. read length, chemistry
('Project',str,False,True),
('Notes',str,False,True),
('Date_Created',str,False,False),
('Date_Ingested',str,False,False),
('Invalid',bool,False,True),
(chksum_prefix+'Read1',str,False,False),
(chksum_prefix+'Read2',str,False,False)
])
readInventoryHeaders = [x for x in readInventoryHeadersFrame['Field'].tolist()]
reqReadFields = readInventoryHeadersFrame['Required']
readInventoryHeadersRequired = [x for x in readInventoryHeadersFrame[reqReadFields]['Field'].tolist()]
recReadFields = readInventoryHeadersFrame['Recommended']
readInventoryHeadersRecommended = [x for x in readInventoryHeadersFrame[recReadFields]['Field'].tolist()]
#for Notes
readHeaderShortcuts = {'Institution':'Institution Performing WGS',
'Person':'Person Performing WGS'}
assHeaderShortcuts = {'Institution':'Institution Performing Analysis',
'Person':'Person Performing Analysis'}
# dfHeaders = ['Lab_ID','Filename']
readFileKeys = ['Read1','Read2']
assFileKeys = ['Filename','BAM_File']
# read_data_fileHeaders = ['Lane','Sample ID','Index','Yield (Mbases)','# Reads','% Perfect Index Reads','% of >= Q30 Bases (PF)','Mean Quality Score (PF)']
def vprint(text):
if _verbose:
print(text)
############ Organizing Mening Lab datasets
########## Transferred to NGS_data_utilities ########################
##Looks for a Meningitis lab sample identifier (M#) in the filename. Currently uses the strict definition ("M" plus 5 digits).
# Returns empty string if fails
########## Transferred to NGS_data_utilities ########################
# def guessNameFromGenomeFile(filename):
# return NGS_data_utilities.guessNameFromGenomeFile(filename)
####### This is the core function for identifying genomes in a directory
##Returns dataframe with header of.##Also saves the DF to file in case it needs to be reviewed/edited
########## Transferred to NGS_data_utilities ########################
# def listGenomeFilesWithNames(directory,outfile = None, deep_search = True):
# return NGS_data_utilities.listGenomeFilesWithNames(directory,outfile, deep_search)
##Copy the assembly file to a new location, with a simple name and simple contig identifiers
### Try to keep the same contig ID numbers and keep them in the same order in the file
## Need a way to match up the unpaired read files that arise from filtering of reads
# r1 = 'R1'
# r2 = 'R2'
# read_codes = [r1,r2]
# def listReadFilesWithNames(directory,outfile = None,read_extension=read_ext):
# return NGS_data_utilities.listReadFilesWithNames(directory,outfile,read_extension)
# def mergeReadDataFile(filename):
# def openReadDataFile(filename):
# return NGS_data_utilities.openReadDataFile(filename)
# def pairReads(fileList):
# return NGS_data_utilities.pairReads(fileList)
#
# def parseIlluminaNames(filename):
# return NGS_data_utilities.parseIlluminaNames(filename)
# # vprint(readInfo[filename])
######## END transfer to NGS_data_utilities ###############3
##Deprecated?
def listAssembliesWithReads(assemblyDir, readDir, outfile = None):
if (assemblyDir is not None) and os.path.isdir(assemblyDir):
assFrame = NGS_data_utilities.listGenomeFilesWithNames(assemblyDir)
else:
assFrame = None
if assemblyDir:
print("{} is not a directory.".format(assemblyDir))
if (readDir is not None) and os.path.isdir(readDir):
readFrame = NGS_data_utilities.listReadFilesWithNames(readDir)
else:
readFrame = None
if readDir:
print("{} is not a directory.".format(readDir))
if not (assFrame is None or readFrame is None):
finalFrame = pd.merge(assFrame,readFrame,how='outer')
elif assFrame is not None:
finalFrame = assFrame
elif readFrame is not None:
finalFrame = readFrame
else:
print("No Data!")
return None
if outfile is not None:
finalFrame.to_csv(outfile,sep='\t',index=False)
return finalFrame
def moveToStandardizedAssemblyFile(genome_file,sample_name,output_directory="./",version = 0,compress=True):
try:
utilities.safeMakeDir(output_directory)
except IOError:
print("Failed to make output directory: {}".format(output_directory))
raise
(genome_format,compressed) = utilities.guessFileFormat(genome_file)
new_filename = None
if genome_format is not None:
base_ID = getBaseID(sample_name,version)
try:
if compressed:
with gzip.open(genome_file,'rt') as fin:
contig_list = [x for x in SeqIO.parse(fin,genome_format)]
else:
with open(genome_file) as fin:
contig_list = [x for x in SeqIO.parse(fin,genome_format)]
except IOError:
print("Failed to read genome file: {}".format(genome_file))
raise
if len(contig_list) == 0:
print("Error in file {}: failed to read genome file. Check extension for implied format".format(genome_file))
return None
new_contigs, c = seq_utilities.standardize_contig_names(contig_list,base_ID)
if c > 0:
print("Re-numbered {} contigs in assembly {}".format(c,base_ID))
new_filename = os.path.join(output_directory,"{}.{}".format(base_ID,genome_format))
if compress:
new_filename += '.gz'
if not os.path.exists(new_filename):
if compress:
with gzip.open(new_filename,'wt') as fout:
SeqIO.write(new_contigs,fout,genome_format)
else:
with open(new_filename,'wt') as fout:
SeqIO.write(new_contigs,fout,genome_format)
os.chmod(new_filename, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) #Read only
else:
print("Unable to save file {} because of prexisting file with same name".format(new_filename))
new_filename = None
else:
print("Failed to identify format for genome file: "+genome_file)
print("Please use an extension in this list : {}".format(utilities.format_guesser))
return new_filename
#Update the inventory file so that it includes the read files that were used to assemble each genome
############## NOT COMPLETE ######################
# def linkGenomeToReads(inventory_file,genome_path,reads_path,read_destination=None):
# if os.path.isfile(inventory_file):
# inventory = pd.read_table(inventory_file)
# else:
# print("Inventory file is not valid. Exiting")
# print(inventory_file)
# return None
#Returns inventory dataframe
## The inventory does not have the "Filename" field that is in the genome_frame
def moveListedGenomes(genome_frame,destination_dir,inventory_file,field_dict=None,extract_info=True):
assert isinstance(genome_frame,pd.DataFrame)
assert field_dict is None or isinstance(field_dict,dict)
assert isinstance(inventory_file,str)
assert isinstance(extract_info,bool)
utilities.safeMakeDir(destination_dir) ##Will throw error if directory cannot be created
##Validate list of assemblies
if genome_frame is None:
raise Exception("No genome frame")
genome_frame_trimmed = genome_frame[genome_frame['Lab_ID'] != ''].copy()
diff = len(genome_frame) - len(genome_frame_trimmed)
if diff > 0:
vprint("Unable to move {} files because no sample name is identified".format(diff))
#Get exsiting inventory for versioning and appending
inventory_dir = os.path.dirname(inventory_file)
if os.path.isfile(inventory_file):
inventory = pd.read_table(inventory_file,dtype=str)
inventory_path_join = functools.partial(os.path.join,inventory_dir)
for c in assFileKeys:
if c in inventory.columns:
good_values = inventory[c].notnull()
inventory.loc[good_values,c] = inventory[good_values][c].apply(inventory_path_join) # pylint: disable=no-member
else:
inventory = pd.DataFrame(columns=inventoryHeaders)
##Add the notes to the incoming list
if field_dict is not None:
for header,value in field_dict.items():
genome_frame_trimmed[header] = value
## Make a new name for the file in the repository
pre_inv = pd.DataFrame(columns=inventoryHeaders)
for f,g in genome_frame_trimmed.groupby('Filename'):
g = g.copy()
sample_name = g['Lab_ID'].unique().tolist()[0] ##TODO: error check that labID is consistant for Filename
tempFrame = inventory[inventory['Lab_ID'] == sample_name].append(pre_inv[pre_inv['Lab_ID'] == sample_name],ignore_index=True)
version = 0 if len(tempFrame) == 0 else int(tempFrame['Version'].max()) + 1
file_name = f
if not os.path.isfile(file_name):
print("File does not exist: {}".format(file_name))
continue
try:
g['Date_Created'] = time.ctime(os.path.getctime(file_name))
except OSError:
print("ERROR: failed to read creation date from file {}".format(file_name))
continue
else:
g['Date_Ingested'] = time.ctime()
try:
new_name = moveToStandardizedAssemblyFile(file_name,sample_name,destination_dir,version)
except IOError:
new_name = None
if new_name == None:
print("ERROR: failed to move file {}".format(file_name))
else:
g['Version'] = str(version)
g['Filename'] = new_name
g['Original_Filename'] = file_name
for _, row in g.iterrows():
pre_inv = pre_inv.append(row,ignore_index=True)
pre_inv = calcChecksums(pre_inv,assFileKeys,validation=False)
#~ temp_filename = utilities.appendToFilename(inventory_file,'temp')
#~ utilities.safeOverwriteCSV(temp_filename,inventory,sep='\t',index=False)
if extract_info:
(pre_inv, contig_inventory) = extractAssemblyStats(pre_inv,inventory_dir) #make contig inventory
##Establish standard headers even if they are currently empty
cols = contig_inventory.columns.values
for col in inventoryContigHeaders:
if col not in cols:
contig_inventory[col] = ''
contig_filename = utilities.appendToFilename(inventory_file,'_contigs')
if os.path.isfile(contig_filename):
old_contigs = pd.read_table(contig_filename)
contig_inventory = old_contigs.append(contig_inventory)
utilities.safeOverwriteCSV(contig_filename,contig_inventory,sep='\t',index=False)
##Make the final inventory set
inventory = inventory.append(pre_inv.drop_duplicates(subset=['Lab_ID','Version']))
##Mark genome "Active" if unique and not Invalid -- otherwise notify user of need to choose one
prospective = (inventory['Active'] != False) & (~inventory['Active'].isin(['False','FALSE','0'])) & (inventory['Invalid'] != True) #Cannot be previously passed up
inventory.loc[~prospective,'Active'] = False ##Assures that all Invalid assemblies are also marked not Active
lab_ids = inventory[prospective]['Lab_ID']
dup_list = lab_ids[lab_ids.duplicated()].tolist()
dups = inventory['Lab_ID'].isin(dup_list)
inventory.loc[prospective & ~dups,'Active'] = True
##Reorder the columns and save to file
cols = inventory.columns
extra_cols = [c for c in cols if c not in inventoryHeaders]
inventory = inventory[inventoryHeaders + extra_cols]
## Relative filepaths
ass_rel_path = functools.partial(os.path.relpath,start=inventory_dir)
for c in assFileKeys:
if c in inventory.columns:
good_values = inventory[c].notnull()
inventory.loc[good_values,c] = inventory[good_values][c].apply(ass_rel_path)# pylint: disable=no-member
utilities.safeOverwriteCSV(inventory_file,inventory,sep='\t',index=False)
### Report duplicate assemblies so that user can select which one is "active"
duplicates = inventory[prospective & dups].copy() ##Todo: this appears to be selecting some that are already inactive. Don't know why.
if len(duplicates) > 0:
dup_count = len(duplicates['Lab_ID'].unique())
print("#####\nNew sequences introduced for {} genomes for which alternative sequences are in the database. Designate some as Active\n########".format(dup_count))
duplicates = duplicates[inventoryHeaders].sort_values(by='Lab_ID') ##Limit it to standard headers for readability. Can always go to the inventory file for full info.]
dupFile = os.path.join(inventory_dir,'inventory_duplicates_choose_Active.tab')
try:
utilities.safeOverwriteCSV(dupFile,duplicates,sep='\t',index=False)
print("Duplicates are listed in "+dupFile)
except IOError:
print("Failed to write table of duplicated assemblies at {}.".format(dupFile))
return pre_inv
###NOTE: getOriginal not implemented ##########
###### Not tested ###########
#Copy read files to repository, and return a data frame with updated information about the read
def moveListedReads(read_frame,destination_dir):
moved_reads = pd.DataFrame(columns=read_frame.columns)
# cols = [col for col in read_frame.columns.tolist() if "Read" in col]
# assert len(cols) > 0
for idx, row in read_frame.iterrows():
# file_time = ''
for c in readFileKeys:
source = row[c]
if pd.notnull(source) and source != '':
dest = os.path.abspath(os.path.join(destination_dir,os.path.basename(source)))
if os.path.exists(dest):
print("Warning: file {} already exists. \n\tNot importing from {}".format(dest,source))
####### ToDo: Grab the old_inv and report the row for the existing data so that it can be linked to assembly
else:
transferred = False
try:
if re.match('http',source):
urllib.request.urlretrieve(source,dest) ##Untested
transferred = True
elif os.path.isfile(source):
# file_time = time.ctime(os.path.getctime(source)) #should be identical for paired files
try:
copyfile(source,dest)
transferred = True
except IOError:
print("ERROR: Unable to write to {}".format(dest))
else:
vprint("Not a read file: {}".format(source))
except Exception:
print("Warning: exception while transferring file {}. \n\tNot importing from {}".format(dest,source))
if transferred: #read file transferred to destination dir
row[c] = dest #os.path.relpath(dest,destination_dir)##Handled upon saving: Note: this assumes that the inventory file is in the reads directory
##This should already be assigned... but I guess it's safe to keep it here to make sure nothing gets mixed up
orig = "Original_{}".format(c)
if orig not in moved_reads.columns:
moved_reads[orig] = ''
row[orig] = source
# if
# read_inv.append(pd.Series([row['Lab_ID'],dest],readInventoryHeaders)) #Lab_ID and Filename
# row.loc['Date_Created'] = file_time
# row.loc['Date_Ingested'] = time.ctime()
moved_reads.loc[idx] = row #keep the original indicies
return moved_reads
##newFrame should already have basic inventory format
def mergeReadsWithInventory(newFrame):
update_Inv, _ = readSetup(RepoReadInvFile, RepoReadDir,ValidOnly=False)
# if os.path.isfile(RepoReadInvFile):
# update_Inv = pd.read_table(RepoReadInvFile)
# else:
# update_Inv = pd.DataFrame(columns=newFrame.columns)
for idx, row in newFrame.iterrows():
row.loc['Read_Set'] = str(sum(update_Inv['Lab_ID'] == row['Lab_ID']))
update_Inv = update_Inv.append(row,ignore_index=True)
newFrame.loc[idx] = row ##record which Read_Set was assigned to these reads
return update_Inv, newFrame
##TODO this needs to be incorporated into the InventoryReader -- the directories will be messed up otherwise
# def exportSelectedGenomes(inventorySelectionFrame,inventoryFile_Dir,destination_dir,output_compress=True,output_format='fastq',getOriginal=False,edge_trim=0,strict_contig=False,contigInvFrame=None):
# assert isinstance(inventorySelectionFrame,pd.DataFrame)
# assert os.path.isdir(inventoryFile_Dir)
# assert output_format in utilities.format_guesser.keys()
# assert edge_trim >= 0
# have_contigs = isinstance(contigInvFrame,pd.DataFrame)
# if have_contigs:
# assert 'Contig_Name' in contigInvFrame.columns
# CIF = contigInvFrame.set_index('Contig_Name')
# else:
# assert not strict_contig ##Must have a contig inventory in order to enforce strict contig selection
# utilities.safeMakeDir(destination_dir)
# for _, row in inventorySelectionFrame.iterrows():
# if getOriginal:
# filename = row.loc['Original_Filename'] ###Untested. Will only work on filesystem where consolidation was done
# else:
# filename = row.loc['Filename']
# filename = os.path.join(inventoryFile_Dir,filename)
# (genome_format,compressed) = utilities.guessFileFormat(filename)
#
# (_,name) = os.path.split(filename)
# dest_file = os.path.join(destination_dir,name)
# ##Test the destination
# if os.path.exists(dest_file):
# dest_file = utilities.appendToFilename(filename,"_"+time.strftime("%Y%m%d%H%M%S"))
# if os.path.exists(dest_file):
# print("Cowardly refusing to overwrite file: "+dest_file)
# print("Failed to export: "+filename)
# ##Could call ExportGenomeFasta
# if (output_format == genome_format) and (edge_trim == 0) and (not strict_contig): ##No need to modify contents
# if output_compress == compressed:
# copyfile(filename,dest_file)
# else: ##Simple decompress
# with gzip.open(filename,'rt') as fin:
# with open(dest_file,'wt') as fout:
# fout.writelines(fin)
# else:
# with utilities.flexible_handle(filename,compressed,'rt') as fin:
# with utilities.flexible_handle(dest_file,output_compress,'wt') as fout:
# for contig in SeqIO.parse(fin,genome_format):
# keep_contig = True
# if strict_contig: ##
# keep_contig = not CIF['Ambiguous']
# if keep_contig:
# if edge_trim>0:
# contig = seq_utilities.trimFASTQtoFirstBase(contig,edge_trim)
# SeqIO.write(contig,fout,output_format)
### Note, could extract above code for more flexible function
# def genomeOrganizer.extractActiveAssembly(genome_name,temp_genome,output_compress=True,output_format='fastq',getOriginal=False,edge_trim=0,strict_contig=False,contigInvFrame=None,repository)
#
def exportGenomeFASTA(genome_file,export_file,file_format = '',is_compressed = None, include_qual = False):
##Setup export
destination_dir = os.path.dirname(export_file)
utilities.safeMakeDir(destination_dir)
##Get info about necessary manipulations
if file_format == '':
(genome_format,genome_compressed) = utilities.guessFileFormat(genome_file)
else:
genome_format = file_format
genome_compressed = is_compressed
##Convert if needed
if genome_format != 'fasta' or genome_compressed:
with utilities.flexible_handle(genome_file,genome_compressed,'rt') as fin:
with open(export_file,'wt') as fout:
if genome_format != 'fasta':
SeqIO.write(SeqIO.parse(fin,genome_format),fout,'fasta')
if include_qual:
raise "Do This" #TODO: Export Qual
else:
fout.write(fin.read())
else:
try:
copyfile(genome_file,export_file)
except IOError:
print("ERROR: Unable to write to {}".format(export_file))
def exportReadFrame(rf,destination,linked=True,InventoryOnly=False):
utilities.safeMakeDir(destination)
if not InventoryOnly:
exported_rows = []
for _,r in rf.iterrows():
Fail = False
rs = r.Read_Set
lid = r.Lab_ID
read_set = '{}_rs{}'.format(lid,rs)
read_count = 0
src1 = r.Read1
if isinstance(src1,str) and os.path.isfile(src1):
base1 = os.path.basename(src1).replace(lid,read_set)
dest1 = os.path.join(destination,base1)
read_count += 1
src2 = r.Read2
if isinstance(src2,str) and os.path.isfile(src2):
base2 = os.path.basename(src2).replace(lid,read_set)
dest2 = os.path.join(destination,base2)
read_count += 1
if read_count == 2:
if linked:
try:
os.link(src1,dest1)
os.link(src2,dest2)
except IOError:
print("Unable to link paired read. Maybe try copying instead?\n\t"+dest1)
linkFail = True
if not linked or linkFail:
try:
copyfile(src1,dest1)
copyfile(src2,dest2)
except IOError:
print("Unable to copy paired reads... \n\t" + dest1)
Fail = True
elif read_count == 1:
print("Error: Found a single read file. Not exporting single reads.")
Fail = True
elif read_count == 0:
print("Error: Found no read files for {} set {}. Probably a SMRT Portal URL".format(lid,rs))
Fail = True
r['Read1'] = dest1 if not Fail else 'None'
r['Read2'] = dest2 if not Fail else 'None'
r['RepositoryRead1'] = src1 ##Just for reference
r['RepositoryRead2'] = src2
exported_rows.append(r)
rf = pd.DataFrame(exported_rows) ##Use updated names in exported inventory
read_file = os.path.join(destination,INVENTORY_FILE_BASE)
utilities.safeOverwriteTable(read_file, rf,'tab',index=False)
###DEPRECATE: move to Assembly Stats
## Returns a dictionary reporting the number of bases in CONTIG that fall below thresholds defined in QUAL_TARGETS
def bin_quality_scores(qual,targets):
### count bases at each quality score
qual_counter = defaultdict(int)
for x in qual: qual_counter[x] += 1
## count the numeber that fall beflow the threshold
qual_thresholds = defaultdict(int)
if len(qual_counter) > 0:
#count for this contig
max_score = max(qual_counter.keys())
qual_bases = 0
for i in range(0,max_score+1):
qual_bases += qual_counter[i]
if i in targets:
qual_thresholds[i] += qual_bases
return qual_thresholds
############ Not Tested ################
##InventoryFrame is updated, and returns a contig inventory if requested
###These variables are moved to AssemblyStats
qual_targets = [20,30,40,50] #Cutoff qualities for reporting low-quality bases; needs to be increasing, because we want values lower than (or equal to) target
quality_head = "Bases_Under_Q"
def extractAssemblyStats(inventoryFrame,inventoryDir=None):
### This is important, but for some reason this may return an "Int64Index" which does not have this function
# assert not inventoryFrame.index.has_duplicates(), 'Cannot extract assembly stats on data frame with non-unique values'
master_contig_inventory = pd.DataFrame()
# for f,g in inventoryFrame.groupby('Filename'):
# filename = f
# g = g.copy()
for idx, row in inventoryFrame.iterrows():
filename = row.loc['Filename']
if not os.path.isabs(filename) and inventoryDir is not None:
filename = os.path.join(inventoryDir,filename)
if os.path.isfile(filename):
(genome_format,compressed) = utilities.guessFileFormat(filename)
assert (compressed) or (inventoryDir is None), 'Always compressed in consolidated directory'
with utilities.flexible_handle(filename,compressed,'rt') as fin:
contig_list = [x for x in SeqIO.parse(fin,genome_format)]
if len(contig_list) > 0:
## Get stats for contigs into dataframe.
contigFrame = getContigStats(contig_list,hasQual = (genome_format == 'fastq'))
assert len(contig_list) == len(contigFrame), "Not all contigs are in dataframe"
##Link these contigs to the assembly
if "Lab_ID" in row:
contigFrame['Lab_ID'] = row['Lab_ID']
if "Version" in row:
contigFrame['Version'] = row['Version']
##Update the assembly record
inventoryFrame.loc[idx,'Contig_Count']=str(len(contig_list))
contigSizes = contigFrame['Contig_Size'].astype(int)
assemblySize = sum(contigSizes)
inventoryFrame.loc[idx,'Bases_In_Contigs'] = str(assemblySize)
largeContigs = contigSizes > 10000
inventoryFrame.loc[idx,'Large_Contig_Count'] = str(sum(largeContigs))
inventoryFrame.loc[idx,'Small_Contig_Count'] = str(sum(~largeContigs))
inventoryFrame.loc[idx,'Bases_In_Large_Contigs'] = str(sum(contigSizes[largeContigs]))
inventoryFrame.loc[idx,'Bases_In_Small_Contigs'] = str(sum(contigSizes[~largeContigs]))
## Import the quality scores
for c in contigFrame.columns:
if c.startswith(quality_head):
inventoryFrame.loc[idx,c] = str(sum(contigFrame[c]))
##Calculate N50 and N90
N_stats = calcN50_stats(contigSizes.tolist())
for n,size in N_stats.items():
header = "N{}".format(n)
inventoryFrame.loc[idx,header] = str(size)
##Add to inventory frame
master_contig_inventory.append(contigFrame,ignore_index=True)
else:
print("Error in assembly inventory. No contigs in file {}".format(filename))
else:
print("Error in assembly inventory. Cannot find file {}".format(filename))
return inventoryFrame, master_contig_inventory ##TODO: find more elegant solution for examining files twice
###DEPRECATE: this has been moved to AssemblyStats. Call that package...
def getContigStats(contig_iterator,hasQual):
contig_records = []
for contig in contig_iterator:
##### Quality scores
if hasQual:
qual = contig.letter_annotations["phred_quality"]
qual_threshold = bin_quality_scores(qual,qual_targets)
else:
qual_threshold = defaultdict(int) #only modified by fastq
##Record contig-specific measures,
this_record = { 'Contig_Name':contig.id,
'Contig_Size':str(len(contig))
}
if len(qual_threshold) > 0: ##TODO: test this with PacBio data
for i in qual_threshold.keys():
header = quality_head + "{}".format(i)
this_record[header] = qual_threshold[i]
contig_records.append(this_record)
##Record whole-genome traits
return pd.DataFrame(contig_records,dtype=str)
####DEPRECATE: move to Assembly Stats
def calcN50_stats(size_list, thresholds = None):
if thresholds is None:
thresholds = [50,90]
assert max(thresholds) < 100, "Cannot calcualte N100 or greater"
assert min(thresholds) > 0, "Cannot calculate N0 or less"
sortedSizes = sorted(size_list,reverse=True)#descending
totalSize = sum(sortedSizes)
threshold_sizes = {x: x*totalSize/100 for x in thresholds}
cumulative = 0
result = dict()
for key in sorted(threshold_sizes.keys()): #ascending
size = threshold_sizes[key] #target
while cumulative < size: #Since size > 0, will always enter loop on first run
x = sortedSizes.pop(0) ##This should never reach the end
cumulative += x
result[key] = x ##This will carry over from previous round if cumulative is already >= size
return result
##TODO: this will calculate twice if a file is listed twice (as in when muliple read sets are used fro assembly)
def calcChecksums(dataFrame,file_fields,validation=True): ##By default, do not modify table
newFrame = pd.DataFrame(columns = dataFrame.columns,index = dataFrame.index)
# newFrame = dataFrame[dataFrame.index.isnull()].copy()
##Add the checksum fields
# chk_fields = ['Adler32_'+x for x in file_fields]
if not validation:
for x in file_fields:
newFrame[chksum_prefix+x] = ''
##Calculate checksums
for idx, row in dataFrame.iterrows():
for x in file_fields:
if x in dataFrame.columns:
url = row[x]
if pd.notnull(url) and url != '':
if not re.match('http',url):
filename = url
if os.path.isfile(filename):
cksum_name = chksum_prefix+x
oldsum = row[cksum_name] if cksum_name in row else None
if pd.notnull(oldsum) or not validation:
with open(filename,'rb') as fin:
chksum = zlib.adler32(fin.read())
if validation:
if oldsum != str(chksum):
print("Error: invalid checksum on file: {}".format(filename))
else:
row[cksum_name] = str(chksum)
else:
print("Warning! Missing file {}".format(filename))
try:
newFrame.loc[idx] = row
except Exception:
print("Unable to add {} to {} at {}".format(row,newFrame,idx))
raise
return newFrame
import argparse
##Note: all_isolates_fasta will bypass htis
def get_isolates_from_args(args,inventory=None):
use_file = bool(args.isolate_file)
use_list = args.isolates and (len(args.isolates) > 0)
isolates = []
if args.all_isolates_fasta:
if use_file or use_list:
sys.exit("Nonsensical isolate selection: cannot export all and a specific list too")
else:
assert inventory is not None
assert isinstance(inventory, Inventory)
return inventory.assembly_frame['Lab_ID'].unique().tolist()
if use_file:
with open(args.isolate_file,'rt') as isolate_in:
for x in isolate_in:
isolates += x.strip().split()
print("Exporting files for {} genomes listed in {}".format(len(isolates),args.isolate_file))
if use_list:
print("Exporting files for {} genomes listed on command line".format(len(args.isolates)))
isolates = isolates + args.isolates
return isolates
def extract(extract_args):
#Check that repository exists -- redundant with InventoryReader
if os.path.isdir(Repository):
setRepositoryPaths(Repository)
else:
print("Invalid Repository: {}\n".format(Repository))
return 1
### Open inventory
inventory = InventoryReader(Repository)
if not inventory.valid:
return 1
### Get isolate list
isolates = get_isolates_from_args(extract_args,inventory)
if len(isolates) == 0:
print("No genomes requested. Exiting")
return 1
### Set export
export_dir = extract_args.target_directory if extract_args.target_directory else os.path.join(os.getcwd(),"GenomeData_"+time.strftime("%Y%m%d%H%M%S"))
export_dir = os.path.realpath(export_dir)
utilities.safeMakeDir(export_dir)
print("Exporting genome data to {}".format(export_dir))
ActiveOnly = False if extract_args.all_versions else True
if extract_args.all_isolates_fasta:
remaining_reads = inventory.getReadsWithoutAssembly()
read_filename = os.path.join(export_dir,'unassembled_reads.tab')
utilities.safeOverwriteTable(read_filename, remaining_reads, 'tab')
if not extract_args.reads_only:
# ass_dir = os.path.join(export_dir,'assemblies') if extract_args.reads_also else export_dir
# utilities.safeMakeDir(ass_dir)
exportedAssemblies = inventory.exportAssemblies(isolates,export_dir,ActiveOnly=ActiveOnly)
if extract_args.reads_only:
inventory.exportReads(isolates,export_dir)
# elif extract_args.reads_also:
# for _,row in exportedAssemblies.iterrows():
# pass
##TODO: this should look up the linked read and export it ... or maybe teh BAM file someday.
# print('Not implemented. Run again with "reads_only"')
##TODO: decide how to report the linkage between reads and assemblies
#
# print("Exporting {} assembly files".format(len(assInv)))
# edge_trim = 15 if extract_args.trim_edges else 0
# output_format = 'fasta' if extract_args.fasta else 'fastq'
# if extract_args.strict_contig_filter:
# contigInvFrame = pd.read_table(RepoContigInvFile)
# else:
# contigInvFrame = None
# exportSelectedGenomes(assInv,os.path.dirname(RepoAssemblyInvFile),extract_args.target_directory,
# output_compress=not extract_args.decompress,
# output_format = output_format,
# edge_trim=edge_trim,
# strict_contig=extract_args.strict_contig_filter,
# contigInvFrame=contigInvFrame)
# print("Finished exporting assembly files")
# export_inventory = os.path.join(extract_args.target_directory,INVENTORY_FILE_BASE)
# utilities.safeOverwriteCSV(export_inventory,assInv,sep='\t',index=False)
# if edge_trim or extract_args.strict_contig_filter:
# with open(export_inventory,'r+t') as inv_file:
# content = inv_file.read()
# inv_file.seek(0,0)
# inv_file.write('###The statistics in this file reflect the Repository file, not any modifications made during extraction' + '\n' + content)
# else:
# print("Unable to export assemblies due to absence of inventory file")
# ##Get read data
# if extract_args.reads_only or extract_args.reads_also:
# readInv = pd.read_table(RepoReadInvFile)
# readInvDir = os.path.dirname(RepoReadInvFile)
# if extract_args.reads_only:
# ##Get all reads associated with LabID
# readInv = readInv[readInv['Lab_ID'].isin(isolates)]
# readDir = export_dir
# else:
# ##assInv should be defined above
# lab_ID = readInv['Lab_ID'].isin(assInv['Lab_ID'])
# ass_version = readInv['Assembly_Version'].isin(assInv['Version'])
# isSelected = lab_ID & ass_version
# readInv = readInv[isSelected]
# readDir = os.path.join(export_dir,READ_DIR_REPO)
# print("Read extraction is not yet implemented")
# utilities.safeMakeDir(readDir)
# #######Export########
# for _,row in readInv.iterrows():
# for r in readFileKeys:
# source = row[r]
# dest = os.path.join(readDir,os.path.basename(source))
# if re.match('http',source):
# urllib.request.urlretrieve(source,dest) ##Untested
# else:
# source = os.path.join(readInvDir,source)
# copyfile(source,dest)
# #####################
# print("Finished exporting read files")
# readInvFile = os.path.join(readDir,INVENTORY_FILE_BASE)
# readInvFile = utilities.appendToFilename(readInvFile,'_reads')
# utilities.safeOverwriteCSV(readInvFile,readInv,sep='\t',index=False)
return 0
def submit(submit_args):
#Check that repository exists -- redundant with InventoryReader
if os.path.isdir(Repository):
setRepositoryPaths(Repository)
else:
print("Invalid Repository: {}\n".format(Repository))
return 1
### Get isolate list
isolates = get_isolates_from_args(submit_args)
if len(isolates) == 0:
print("No genomes requested. Exiting")
return 1
### Set export
export_dir = submit_args.target_directory if submit_args.target_directory else os.path.join(os.getcwd(),"GenomeData_"+time.strftime("%Y%m%d%H%M%S"))
utilities.safeMakeDir(export_dir)
inventory = InventoryReader(Repository)
if not inventory.valid:
return 1
inventory.exportAssemblies(isolates,export_dir,ActiveOnly=True,include_qual=True) ##TODO: export qual
def ingest(ingest_args):
def dump_and_exit(extraFrame=None,extraName='',realPath=True): ##Should probably rename "dump" since it does not exit on its own... (must call return)
##Establish location to dump error reports
if ingest_args.report_location:
dump_basename = ingest_args.report_location + '_ingest'
else:
dump_basename = os.path.join(os.getcwd(),"AbortedIngest")
ass_dump= utilities.appendToFilename(dump_basename, '_assemblies')
reads_dump = utilities.appendToFilename(dump_basename,'_reads')
bam_dump = utilities.appendToFilename(dump_basename, '_bam')
merged_dump = utilities.appendToFilename(dump_basename,'_merged')
extra_dump = utilities.appendToFilename(dump_basename, extraName)
##Notify user
if ingest_args.report_location:
print("Writing aborted import information to requested report location...")
else:
print("Writing aborted import information to current directory...")
print("\t"+dump_basename)
##Dump
if extraFrame is not None:
utilities.safeOverwriteTable(extra_dump, extraFrame,'tab',index=False)
if samFrame is not None:
utilities.safeOverwriteTable(bam_dump, samFrame, 'tab',index=False)
if assFrame is not None:
utilities.safeOverwriteTable(ass_dump, assFrame, 'tab',index=False)
if readFrame is not None:
utilities.safeOverwriteTable(reads_dump, readFrame,'tab',index=False)
if ingest_args.reads_to_assembly:
keys = ['Lab_ID']
rc_rename = {r:'Reads_{}'.format(r) for r in readFrame.columns if r not in keys}
readFrame.rename(columns=rc_rename,inplace=True)
mergeExport = pd.merge(assFrame,readFrame,on=keys)
if realPath:
for c in readFileKeys + assFileKeys:
if c in mergeExport.columns:
not_null = mergeExport[c].notnull() #& mergeExport[c].apply(os.path.isfile)
mergeExport.loc[not_null,c] = mergeExport[not_null][c].apply(os.path.realpath)
utilities.safeOverwriteTable(merged_dump,mergeExport,'tab',index=False)
# for c in assFileKeys:
# if c in assembly_frame.columns:
# good_values = assembly_frame[c].notnull()
# assembly_frame.loc[good_values,c] = assembly_frame[good_values][c].apply(inventory_path_join)
print("Exiting")
return 1
##Initialize main variables
readsAdded = readFrame = samFrame = None
assFrame = assAdded = None
mergedReadAss = None
### Test for legitimate comibnations of Ingest files
legit_args = True
if ingest_args.assembly_guide and ingest_args.combined_guide:
legit_args = False
print("Cannot provide both an assembly guide and a combined guide.")
require_reads = (len(ingest_args.read_notes) > 0) or ingest_args.reads_to_assembly or ingest_args.copy_read_files
have_reads = ingest_args.read_directory or ingest_args.combined_guide
##TODO: this may have bugs in Read_Set is given rather than read filenames
if require_reads and not have_reads:
legit_args = False
print("You have provided arguments that require reads but have not provided the reads themselves")
require_assemblies = ingest_args.assembly_notes or ingest_args.assembly_extension or ingest_args.reads_to_assembly or ingest_args.sam_directory or ingest_args.sam_subdirectory
have_assemblies = ingest_args.assembly_directory or ingest_args.assembly_guide or ingest_args.combined_guide
if require_assemblies and not have_assemblies:
legit_args = False
print("You have provided arguments that require assemblies but have not provided the assemblies themselves")
if ingest_args.sam_subdirectory and not ingest_args.assembly_directory:
legit_args = False
print("To search for BAM/SAM files, you must include an assembly directory.")
if ingest_args.assembly_guide:
if not os.path.isfile(ingest_args.assembly_guide):
legit_args = False
print("Assembly guide is not a file")
if ingest_args.combined_guide:
if not os.path.isfile(ingest_args.combined_guide):
legit_args = False
print("Combined guide is not a file")
if not legit_args:
return 1
##Make repository if necessary
setRepositoryPaths(Repository,True)
inventory = InventoryWriter(Repository)
if not inventory.valid:
return 1
#### Convert Note lists to dictionary (validating the format)
try:
read_notes = {t[0] : t[1] for t in [item.split("=") for item in ingest_args.read_notes]}
for key,replace in readHeaderShortcuts.items():
if key in read_notes:
read_notes[replace] = read_notes.pop(key)
assembly_notes = {t[0] : t[1] for t in [item.split("=") for item in ingest_args.assembly_notes]}
for key,replace in assHeaderShortcuts.items():
if key in assembly_notes:
assembly_notes[replace] = assembly_notes.pop(key)
except:
print("Error: Failed to parse the notes. Exiting")
raise
##Establish DataFrames for the imported files
invTemplate = pd.DataFrame(columns = readInventoryHeaders)
if ingest_args.combined_guide:
# print(ingest_args.combined_guide)
try:
guideFrame = pd.read_table(ingest_args.combined_guide)
except IOError:
print("Cannot open the guide file. Exiting")
raise
# print (guideFrame.columns)
guideFrame.dropna(how='all',inplace=True)
read_col_regex = re.compile(r"(Read[s12]?)([ _](.*)|$)?")
read_cols = ['Lab_ID'] #Need Lab_ID in both readFrame and assFrame
ass_cols = []
renamed_cols = read_cols.copy()
for c in guideFrame.columns.tolist():
read_match = read_col_regex.match(c)
if read_match:
read_cols.append(c)
if c in['Reads','Read']:
renamed_cols.append("Read1") ##Better not have two columns!!
else:
m_groups = read_match.groups()# 1 "Read[]", 2 optional suffix, 3 content of suffix
if m_groups[2] is None:
new_name = m_groups[0]
else:
new_name = m_groups[2]
renamed_cols.append(new_name) ##Don't rename ## TODO: strip "reads" prefix from name -- use the match
else:
ass_cols.append(c)
assFrame = guideFrame[ass_cols].copy()
readFrame = guideFrame[read_cols].copy() ##Frames will have same index
readFrame.columns = renamed_cols #TODO this will leave some messy field names, but I don't think we can universally remove "Read" from the beginning of each field
if readFrame is None and ingest_args.read_directory:
readFrame = NGS_data_utilities.listReadFilesWithNames(ingest_args.read_directory,read_extension=ingest_args.read_extension) #Read1 and Read2 for paired reads; Read1 for unpaired.
if readFrame is None:
print("Failed to identify any reads in "+ingest_args.read_directory)
print("Exiting")
return 1
##Organize columns so that standard fileds come first
readFrame = readFrame.append(invTemplate)
cols = readInventoryHeaders + [c for c in readFrame.columns if c not in readInventoryHeaders]
readFrame = readFrame[cols].copy()
if ingest_args.sam_directory:
sam_dir = ingest_args.sam_directory
if os.path.isdir(sam_dir):
samFrame = NGS_data_utilities.listSAMFilesWithNames(sam_dir)
else:
print("{} is not a directory.".format(sam_dir))
assembly_has_ReadSet = False
if assFrame is None: ##TODO: this treats the combined guide and the assembly guide inconsistantly
assemblyDir = ingest_args.assembly_directory
if assemblyDir:
# genome_frame = listAssembliesWithReads(ingest_args.assembly_directory,read_dir,ingest_args.assemblyInListFile)
print("Identifying genome assembly files in {}".format(assemblyDir))
if os.path.isdir(assemblyDir):
deep_search = False if ingest_args.shallow_search_assemblies else True
assFrame = NGS_data_utilities.listGenomeFilesWithNames(assemblyDir,deep_search = deep_search)
if assFrame is None or len(assFrame) == 0:
print("Failed to find assemblies in "+assemblyDir)
else:
print("{} is not a directory.".format(assemblyDir))
if ingest_args.sam_subdirectory:
tempSamFrame = NGS_data_utilities.listSAMFilesWithNames(assemblyDir, None, True)
samFrame = samFrame.append(tempSamFrame) if samFrame is None else tempSamFrame
assemblyGuide = ingest_args.assembly_guide
if assemblyGuide:
print("Reading assembly guide file: {}".format(assemblyGuide))
if os.path.isfile(assemblyGuide):
guideFrame = pd.read_table(assemblyGuide,comment='#').dropna(how='all')
assembly_has_ReadSet = 'Read_Set' in guideFrame.columns
if assembly_has_ReadSet:
if any(guideFrame.Read_Set.isnull()):
print("Assembly guide file has 'Read_Set' column, but some fields are empty. Cannot proceed.")
return 1
else: ##Assembly files can be listed twice only if they are reporting two read sets
dups = sum(guideFrame['Filename'].duplicated())
if dups > 0:
print("Some files are listed twice in the guide file -- aborting")
return 1
if assFrame is None:
assFrame = guideFrame
else:
old_sum = len(assFrame) + len(guideFrame)
assFrame = pd.merge(assFrame,guideFrame,how='outer',on='Filename')
if old_sum != len(assFrame):
print("Found {} duplicates between the guide file and the directory. Aborting".format(old_sum - len(assFrame)))
return 1
###Merge SAM field:
if (samFrame is not None) and (assFrame is not None):
updateFrame = NGS_data_utilities.mergeAssWithSam(assFrame,samFrame)
valid_sams = NGS_data_utilities.validateAssSamMatch(updateFrame)
if valid_sams:
assFrame = updateFrame
else:
return dump_and_exit(updateFrame,'BAM_Assembly')
##Before doing anything, make sure that required fields are in readFrame, assemblyFrame
### First, add the notes
for header,value in read_notes.items(): ##Remainder are Header=Value pairs for all records in directory
readFrame[header] = value
for header,value in assembly_notes.items():
assFrame[header] = value
assembly_notes = None
##Transfer shared columns
# shared_cols = ['Project','Technology'] ##Not sure how to do this without risk of overwriting something
##Test required fields
field_error = "Error: not all records contain all required fields\n"
if isinstance(assFrame,pd.DataFrame):
if set(inventoryHeadersRequired) <= set(assFrame.columns):
trim_table = assFrame[inventoryHeadersRequired].copy()
invalid_records = trim_table.isnull().any(axis=1)
if sum(invalid_records) > 0:
print(field_error + "\tAssembly frame has {} invalid records".format(sum(invalid_records)))
field_error = ''
reqAssSet = set(inventoryHeadersRequired)
realAssSet = set(assFrame.dropna(axis=1,how='any').columns.tolist())
missingAssSet = reqAssSet - realAssSet
else:
missingAssSet = set()
if isinstance(readFrame,pd.DataFrame):
readsExisting, readsNew = inventory.compareWithExistingReads(readFrame)
if len(readsNew) > 0:
if set(readInventoryHeadersRequired) <= set(readFrame.columns):
trim_table = readFrame[readInventoryHeadersRequired].copy()
invalid_records = trim_table.isnull().any(axis=1)
if sum(invalid_records) > 0:
print(field_error+"\tRead frame has {} invalid records".format(sum(invalid_records)))
field_error = ''
reqReadSet = set(readInventoryHeadersRequired)
realReadSet = set(readFrame.dropna(axis=1,how='any').columns.tolist())
missingReadSet = reqReadSet - realReadSet
else:
missingReadSet = set() ##No new reads
else:
missingReadSet = set()
if len(missingAssSet) > 0 or len(missingReadSet) > 0:
if len(missingAssSet) > 0:
print(field_error+"\tAssembly guide is missing the following fields: {}".format(missingAssSet))
field_error = ''
if len(missingReadSet) > 0:
print(field_error+"\tRead guide is missing the following fields: {}".format(missingReadSet))
field_error = ''
if not ingest_args.override_fields: ##Provide a guide file to work with
##First add the required fields
if assFrame is not None:
for c in inventoryHeadersRecommended:
if c not in assFrame.columns:
assFrame[c] = None
assFrame = assFrame[inventoryHeadersRecommended].copy()
if readFrame is not None:
for c in readInventoryHeadersRecommended:
if c not in readFrame.columns:
readFrame[c] = None
keep_headers = readInventoryHeadersRecommended
keep_headers += [x for x in NGS_data_utilities.read_data_fileHeaders if (x in readFrame.columns) and (x not in keep_headers)]
readFrame = readFrame[keep_headers].copy()
##Then export in appropriate format
return dump_and_exit()
########################## Updating starts here ####################3
##Import reads ###################### Replace this with InventoryWriter??? Already have inventory object
if readFrame is not None:
##Mark all reads as "original" location even if not moved to new location
for c in readFileKeys:
orig = "Original_{}".format(c)
readFrame[orig] = readFrame[c]
readsExisting, readsNew = inventory.compareWithExistingReads(readFrame)
vprint("Found {} existing reads and {} new reads".format(len(readsExisting),len(readsNew)))
if ingest_args.copy_read_files:########## TRANSFER #
print("Transferring reads to repository")
readsNew = moveListedReads(readsNew, RepoReadDir)
readsNew = calcChecksums(readsNew,readFileKeys,validation=False)
readsAdded = inventory.mergeReadsWithInventory(readsNew,readsExisting) ##ReadsAdded is just the preInv with the "Read_Set" column
# inventory.update_reads_fromFile(readsExisting) ##TODO figure out how to import the Excel data for the reads without messing up table
# inventory.saveReadInventory()
utilities.safeOverwriteCSV(RepoReadInvFile,readPackup(inventory.read_frame,RepoReadDir),sep='\t',index=False)
print("Updated read inventory")
if assFrame is not None and len(assFrame) > 0:
print("Moving {} assembly files into the repository".format(len(assFrame.Filename.unique())))
assAdded = moveListedGenomes(assFrame,RepoAssemblyDir,RepoAssemblyInvFile,assembly_notes,extract_info=True)
inventory.ingestBAMbyLinkage(assAdded) ##This adds files to BAM directory in repository, but does not modify inventory table
elif assemblyDir or assemblyGuide or ingest_args.combined_guide:
print("No genomes files. Exiting")
return 1
if ingest_args.reads_to_assembly or ingest_args.combined_guide or assembly_has_ReadSet:
if assAdded is None:
print("Cannot map reads to assemblies without both reads and assemblies. Aborting")
return 1
print("Creating link between reads and assemblies")
if ingest_args.combined_guide:
mergedReadAss = | pd.merge(assAdded,readsAdded,left_index=True,right_index=True,on='Lab_ID') | pandas.merge |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import numpy.testing as npt
import pandas as pd
import qiime2
import biom
from warnings import filterwarnings
from qiime2.plugin.testing import TestPluginBase
from q2_types.feature_data import DNAFASTAFormat
import pandas.util.testing as pdt
from q2_quality_control.quality_control import (
exclude_seqs, evaluate_composition, evaluate_seqs, evaluate_taxonomy)
from q2_quality_control._utilities import (
_evaluate_composition, _collapse_table, _drop_nans_zeros,
_compute_per_level_accuracy, compute_taxon_accuracy,
_tally_misclassifications, _identify_incorrect_classifications,
_find_nearest_common_lineage, _interpret_metric_selection,
_match_samples_by_index, _validate_metadata_and_exp_table)
from q2_quality_control._evaluate_seqs import _evaluate_seqs
from q2_quality_control._evaluate_taxonomy import (
_evaluate_taxonomy, _extract_taxa_names, _index_is_subset,
_validate_indices_and_set_joining_mode)
filterwarnings("ignore", category=UserWarning)
def _dnafastaformats_to_series(fasta):
fasta = qiime2.Artifact.import_data("FeatureData[Sequence]", fasta)
return fasta.view(pd.Series)
# test template for EvaluateSeqsTests
def load_evaluate_seqs(query_sequences, reference_sequences, exp_fp):
results, alignments, g = _evaluate_seqs(
query_sequences, reference_sequences, show_alignments=False)
# need to cast to numeric to match dtypes that are interpreted in exp
# as it is read in by read_csv
results = results.apply(lambda x: pd.to_numeric(x, errors='ignore'))
exp = pd.read_csv(exp_fp, sep='\t', index_col=0)
pdt.assert_frame_equal(results, exp)
class QualityControlTestsBase(TestPluginBase):
package = 'q2_quality_control.tests'
class SequenceQualityControlBase(QualityControlTestsBase):
def setUp(self):
super().setUp()
def _load_DNAFASTAFormat(reads_fn):
reads_fp = self.get_data_path(reads_fn)
return DNAFASTAFormat(reads_fp, mode='r')
self.query_seqs = _load_DNAFASTAFormat('query-sequences.fasta')
self.bacterial_ref = _load_DNAFASTAFormat(
'bacterial-ref-sequences.fasta')
self.bacterial_exp = _dnafastaformats_to_series(
_load_DNAFASTAFormat('bacterial-query-sequences.fasta'))
self.fungal_ref = _load_DNAFASTAFormat('fungal-ref-sequences.fasta')
self.fungal_exp = _dnafastaformats_to_series(
_load_DNAFASTAFormat('fungal-query-sequences.fasta'))
self.query_seqs_with_mismatch = _load_DNAFASTAFormat(
'query-sequences-with-mismatch.fasta')
self.query_seqs_short = _load_DNAFASTAFormat(
'query-sequences-short.fasta')
self.query_seqs_part_rand = _load_DNAFASTAFormat(
'query-partially-random.fasta')
class ExcludeSeqsBase(object):
method = None
def test_exclude_seqs_bacterial_hit_fungal_miss(self):
obs, missed = exclude_seqs(
self.query_seqs, self.bacterial_ref, method=self.method)
self.assertEqual(
sorted(obs.index), sorted(self.bacterial_exp.index))
self.assertEqual(
sorted(missed.index), sorted(self.fungal_exp.index))
def test_exclude_seqs_fungal_hit_bacterial_miss(self):
obs, missed = exclude_seqs(
self.query_seqs, self.fungal_ref, method=self.method)
self.assertEqual(sorted(obs.index), sorted(self.fungal_exp.index))
self.assertEqual(
sorted(missed.index), sorted(self.bacterial_exp.index))
def test_exclude_seqs_all_hit(self):
obs, missed = exclude_seqs(
self.query_seqs, self.query_seqs, method=self.method)
self.assertEqual(sorted(obs.index), sorted(
_dnafastaformats_to_series(self.query_seqs).index))
self.assertEqual(sorted(missed.index), [])
def test_exclude_seqs_all_miss(self):
obs, missed = exclude_seqs(
self.query_seqs_with_mismatch, self.fungal_ref, method=self.method)
self.assertEqual(sorted(missed.index), sorted(
_dnafastaformats_to_series(
self.query_seqs_with_mismatch).index))
self.assertEqual(sorted(obs.index), [])
def test_exclude_seqs_97_perc_identity(self):
obs, missed = exclude_seqs(
self.query_seqs_with_mismatch, self.bacterial_ref,
method=self.method)
self.assertEqual(
sorted(obs.index), ['2MISA', '2MISB'])
self.assertEqual(
sorted(missed.index), ['10MISA', '8MISA', '8MISB'])
def test_exclude_seqs_96_perc_identity(self):
obs, missed = exclude_seqs(
self.query_seqs_with_mismatch, self.bacterial_ref,
method=self.method, perc_identity=0.965)
self.assertEqual(
sorted(obs.index), ['2MISA', '2MISB', '8MISA', '8MISB'])
self.assertEqual(
sorted(missed.index), ['10MISA'])
def test_exclude_seqs_99_perc_identity(self):
obs, missed = exclude_seqs(
self.query_seqs_with_mismatch, self.bacterial_ref,
method=self.method, perc_identity=0.99)
self.assertEqual(sorted(missed.index), sorted(
_dnafastaformats_to_series(
self.query_seqs_with_mismatch).index))
self.assertEqual(sorted(obs.index), [])
class BlastTests(ExcludeSeqsBase, SequenceQualityControlBase):
method = 'blast'
class VsearchTests(ExcludeSeqsBase, SequenceQualityControlBase):
method = 'vsearch'
class SequenceQualityControlTests(SequenceQualityControlBase):
def setUp(self):
super().setUp()
def test_exclude_seqs_high_evalue_low_perc_query_aligned_permissive(self):
obs, missed = exclude_seqs(
self.query_seqs_part_rand, self.bacterial_ref,
method='blast', perc_identity=0.97, evalue=10000000000000000,
perc_query_aligned=0.1)
self.assertEqual(sorted(obs.index), sorted(
_dnafastaformats_to_series(self.query_seqs_part_rand).index))
self.assertEqual(sorted(missed.index), [])
def test_exclude_seqs_blast_low_evalue_discards_weak_matches(self):
obs, missed = exclude_seqs(
self.query_seqs_part_rand, self.bacterial_ref,
method='blast', perc_identity=0.97, evalue=10**-30,
perc_query_aligned=0.1)
self.assertEqual(
sorted(obs.index), ['YAYIMATCH'])
self.assertEqual(
sorted(missed.index), ['RAND1', 'RAND2'])
def test_exclude_seqs_short_seqs_miss_with_default_blast(self):
obs, missed = exclude_seqs(
self.query_seqs_short, self.bacterial_ref, method='blast')
self.assertEqual(sorted(missed.index), sorted(
_dnafastaformats_to_series(self.query_seqs_short).index))
self.assertEqual(sorted(obs.index), [])
def test_exclude_seqs_short_seqs_hit_with_default_vsearch(self):
obs, missed = exclude_seqs(
self.query_seqs_short, self.bacterial_ref, method='vsearch')
self.assertEqual(sorted(obs.index), sorted(
_dnafastaformats_to_series(self.query_seqs_short).index))
self.assertEqual(sorted(missed.index), [])
def test_exclude_seqs_short_seqs_hit_with_blastn_short(self):
obs, missed = obs, missed = exclude_seqs(
self.query_seqs_short, self.bacterial_ref,
method='blastn-short', evalue=10000)
self.assertEqual(sorted(obs.index), sorted(
_dnafastaformats_to_series(self.query_seqs_short).index))
self.assertEqual(sorted(missed.index), [])
def test_exclude_seqs_short_seqs_miss_with_blastn_short_low_eval(self):
obs, missed = obs, missed = exclude_seqs(
self.query_seqs_short, self.bacterial_ref,
method='blastn-short', perc_identity=0.01, evalue=10**-30)
self.assertEqual(sorted(missed.index), sorted(
_dnafastaformats_to_series(self.query_seqs_short).index))
self.assertEqual(sorted(obs.index), [])
class UtilitiesTests(QualityControlTestsBase):
def test_drop_nans_zeros(self):
test_df1 = pd.DataFrame({'a; b': [0., 0., 0.], 'b; c': [1., 0., 0.],
'c; d': [1., np.nan, 1.]})
filtered_df = pd.DataFrame(
{'b;c': [1., 0.], 'c;d': [1., 1.]}, index=[0, 2])
new_df = _drop_nans_zeros(test_df1)
pdt.assert_frame_equal(filtered_df, new_df)
def test_compute_taxon_accuracy(self):
res = compute_taxon_accuracy(
pd.Series({'a;b': 1, 'b;c': 1, 'c;d': 1}),
pd.Series({'a;b': 1, 'b;c': 1, 'c;e': 1, 'd;e': 1}))
self.assertEqual(res, (0.5, 0.6666666666666666))
def test_compute_taxon_accuracy_no_matches(self):
res = compute_taxon_accuracy(
| pd.Series({'a': 1, 'b': 1, 'c': 1}) | pandas.Series |
import config as cfg
import numpy as np
import pandas as pd
import warnings
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, multilabel_confusion_matrix
def print_conf_mat(true_labels, preds):
cm = confusion_matrix(true_labels, preds)
I = pd.Index(['True Negative', 'True Positive'], name="rows")
C = pd.Index(['Predicted Negative', 'Predicted Positive'], name="columns")
cm_df = | pd.DataFrame(data=cm, index=I, columns=C) | pandas.DataFrame |
import pandas as pd
__all__ = [
"text2array",
"array2df",
"text2df",
]
def text2array(file):
res = []
with open(file) as f:
for sent in f.readlines():
text, label = sent.split("\t")
res.append([text, label[0]])
return res
def array2df(array):
return | pd.DataFrame(array, columns=["text", "label"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
##################################################################
#
# # Created by: <NAME>
#
# # On date 20-03-2019
#
# # Game Of Thrones Analisys
#
#
#
#################################################################
"""
Challenge
There are approximately 2,000 characters in A Song of Ice and Fire by <NAME>. This book
series was the inspiration for the HBO series Game of Thrones. The tasks here are to predict which
characters in the series will live or die, and give data-driven recommendations on how to survive in
Game of Thrones.
"""
################################################################################################
# ## GOT Dictonary
# S.No = Character number (by order of appearance)
#
# name = Character name
#
# title = Honorary title(s) given to each character
#
# male = 1 = male, 0 = female
#
# culture = Indicates the cultural group of a character
#
# dateOfBirth = Known dates of birth for each character (measurement unknown)
#
# mother = Character's biological mother
#
# father = Character's biological father
#
# heir = Character's biological heir
#
# house = Indicates a character's allegiance to a house (i.e. a powerful family)
#
# spouse = Character's spouse(s)
#
# book1_A_Game_Of_Thrones = 1 = appeared in book, 0 = did not appear in book
#
# book2_A_Clash_Of_Kings = 1 = appeared in book, 0 = did not appear in book
#
# book3_A_Storm_Of_Swords = 1 = appeared in book, 0 = did not appear in book
#
# book4_A_Feast_For_Crows = 1 = appeared in book, 0 = did not appear in book
#
# book5_A_Dance_with_Dragons = 1 = appeared in book, 0 = did not appear in book
#
# isAliveMother = 1 = alive, 0 = not alive
#
# isAliveFather = 1 = alive, 0 = not alive
#
# isAliveHeir = 1 = alive, 0 = not alive
#
# isAliveSpouse = 1 = alive, 0 = not alive
#
# isMarried = 1 = married, 0 = not married
#
# isNoble = 1 = noble, 0 = not noble
#
# age = Character's age in years
#
# numDeadRelations = Total number of deceased relatives throughout all of the books
#
# popularity = Indicates the popularity of a character (1 = extremely popular (max), 0 = extremely unpopular (min))
#
# isAlive = 1 = alive, 0 = not alive
##################################################################################################
##################
# Import Libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split # train/test split
from sklearn.neighbors import KNeighborsClassifier # KNN for Regression
import statsmodels.formula.api as smf # regression modeling
import sklearn.metrics # more metrics for model performance evaluation
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
# Setting pandas print options
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
#############
# Import data
file = 'GOT_character_predictions.xlsx'
got = pd.read_excel(file)
##############################################################################################
# # Exploratory analysis of the dataset
##############################################################################################
# Column names
got.columns
# Displaying the first rows of the DataFrame
print(got.head())
# Dimensions of the DataFrame
got.shape
# Information about each variable
got.info()
# Descriptive statistics
got.describe().round(2)
"""
We have many variables with missing values.
Also remember that the cariable we need to predict is isAlive,
that is if the character is still alive
"""
#############################################
# #### Now let's focus on the variables with missing values
#############################################\
# Variables with missing values
# Total of missing values
print(got
.isnull()
.sum()
.sum()
)
# Missing values per column
print(got
.isnull()
.sum()
)
"""
Here we can see that we have a big problem with missing values.<br>
Some of them are manageable, but other, really dificult, with a lot of missing values inside, almost
every value is missing.
"""
#########################
# #### Let's check some individual variables
########################
# Let's start with the one we want to predict: isAlive
# Type of values in the variable
got['isAlive'].unique()
# We have 0 and 1, boolean
# Now let's count the values
got['isAlive'].value_counts()
# Here we can see that there could be a bias in the data because
# there a lot less of 1 (alive) examples, this can be harder to predict in the future.
##################
# #### Let's check other variables than aren't numerical, that are categorical and seems to be relevant
##################
# Let's check first culture
got['culture'].unique()
#Wow that's seems a lot of different cultures, let's count it***
# Count the unique values of cultures:
len(got['culture'].unique())
# Here as we can see there a lot of unique str values for culture: 65***
###############################################################################################
# Here let's create some engineering features
###############################################################################################
# Firt let's make a copy of our sf as V1
got_v1 = pd.DataFrame.copy(got)
# let's group the all the obs using isAlive
got_v1 = got_v1.groupby(['isAlive']).apply(lambda x: x.fillna(x.median()))
"""
This is a really good approach to have the obs divided by the ones that are alive
and the ones that are dead, making easier the analysis and egineer features
creation.
"""
# Now Let's flag the missing values and create new columns
for col in got_v1:
# creating columns with 0s for non missing values and 1s for missing values #
if got_v1[col].isnull().astype(int).sum()>0:
got_v1['m_'+col]=got_v1[col].isnull().astype(int)
else:
print("""There is an error in the loop, check it !""")
print(got_v1.info())
print(got_v1.head())
# Let's create a columns with how many characters appears in how many books
got_v1['c_all_books'] = got_v1['book1_A_Game_Of_Thrones'] + got_v1['book2_A_Clash_Of_Kings'] + got_v1['book3_A_Storm_Of_Swords'] + got_v1['book4_A_Feast_For_Crows'] + got_v1['book5_A_Dance_with_Dragons']
print(got_v1['c_all_books'].sort_values(ascending=False).head())
print(got_v1['c_all_books'].count())
# now let's see how many character appears in 1, 2, 3 & 4 books
# 1 book only
got_v1['c_1_book'] = (got_v1['c_all_books'] == 1).astype(int)
print(got_v1['c_1_book'].head())
print(got_v1['c_1_book'].sum())
# 2 books only
got_v1['c_2_book'] = (got_v1['c_all_books'] == 2).astype(int)
print(got_v1['c_2_book'].head())
print(got_v1['c_2_book'].sum())
# 3 books only
got_v1['c_3_book'] = (got_v1['c_all_books'] == 3).astype(int)
print(got_v1['c_3_book'].head())
print(got_v1['c_3_book'].sum())
# 4 books only
got_v1['c_4_book'] = (got_v1['c_all_books'] == 4).astype(int)
print(got_v1['c_4_book'].head())
print(got_v1['c_4_book'].sum())
# 5 books only
got_v1['c_5_book'] = (got_v1['c_all_books'] == 5).astype(int)
print(got_v1['c_5_book'].head())
print(got_v1['c_5_book'].sum())
# NO books! This characters appears in 0 books
got_v1['c_0_book'] = (got_v1['c_all_books'] == 0).astype(int)
print(got_v1['c_0_book'].head())
print(got_v1['c_0_book'].sum())
# let's summarize the amount of each section
print('Total characters in 0 book:', got_v1['c_0_book'].sum())
print('Total characters in 1 book:', got_v1['c_1_book'].sum())
print('Total characters in 2 book:', got_v1['c_2_book'].sum())
print('Total characters in 3 book:', got_v1['c_3_book'].sum())
print('Total characters in 4 book:', got_v1['c_4_book'].sum())
print('Total characters in 5 book:', got_v1['c_5_book'].sum())
# Let's correct age
print(got_v1[['name','age']].sort_values(by='age').head())
# As we can see the first 2 values are wrong, here we need some research
# and the the number given is the year number.
# Let's drop this 2 observations
# Rhaego & Doreah
got_v1 = got_v1.drop(got_v1[got_v1.name == 'Rhaego'].index)
got_v1 = got_v1.drop(got_v1[got_v1.name == 'Doreah'].index)
print(got_v1[['name','age']].sort_values(by='age').head())
# Here we can see that both values were drop
# Now is better to unserstan the graphs below
# And because were only 2 observations, is good to drop and continue
# Let's now create popularity features
# Let's start with popularity > 0.30
got_v1['popu_0.3'] = (got_v1['popularity'] > 0.30).astype(int)
print(got_v1['popu_0.3'].sort_values(ascending=False).head(10))
print(got_v1['popu_0.3'].sum())
# Let's continue with popularity > 0.50
got_v1['popu_0.5'] = (got_v1['popularity'] > 0.50).astype(int)
print(got_v1['popu_0.5'].sort_values(ascending=False).head(10))
print(got_v1['popu_0.5'].sum())
# Let's continue with popularity > 0.80
got_v1['popu_0.8'] = (got_v1['popularity'] > 0.80).astype(int)
print(got_v1['popu_0.8'].sort_values(ascending=False).head(10))
print(got_v1['popu_0.8'].sum())
# Now at last, let's create 3 cat for numDeadRealations: > 1, 4 (more than that
# we get really small samples)
# We start with > 1
got_v1['dead_rela_1'] = (got_v1['numDeadRelations'] > 1).astype(int)
print(got_v1['dead_rela_1'].sort_values(ascending=False).head(10))
print(got_v1['dead_rela_1'].sum())
# We continue with > 4
got_v1['dead_rela_4'] = (got_v1['numDeadRelations'] > 4).astype(int)
print(got_v1['dead_rela_4'].sort_values(ascending=False).head(10))
print(got_v1['dead_rela_4'].sum())
# Here we will remain only with the new ones > 1 & > 4***
# ### Now let's fill in with 0 the missing values in the age
# This is so we can use that column because it is possible to have prediction power
for age in got_v1['age']:
if (got_v1['age'].isnull().any()) == True:
got_v1['age'] = got_v1['age'].fillna(got_v1['age'].min())
print(got_v1['age'].isnull().sum())
# Now we've fill in all the NaS with 0 to use the column
#Let's round hte variable popularity
got_v1['popularity'].round(2).head(10)
# Now let's create a variable that when m_culture match isAlive equals 1 to see a trend
got_v1['culture_alive'] = (got_v1['m_culture'] == got_v1['isAlive']).astype(int)
# Now let's create a variable that when m_house match isAlive equals 1 to see a trend
got_v1['house_alive'] = (got_v1['m_house'] == got_v1['isAlive']).astype(int)
# Now let's create a variable that when m_title match isAlive equals 1 to see a trend
got_v1['title_alive'] = (got_v1['m_title'] == got_v1['isAlive']).astype(int)
##############
# Now let's work on the cultures
# Fisrt let's correct the amount of cultures, they are repeated
got_v1['culture'].unique()
# here we can see that there are repeated names
# Let's create a dictonary with the names
cult = {
'Summer Islands': ['summer islands', 'summer islander', 'summer isles'],
'Ghiscari': ['ghiscari', 'ghiscaricari', 'ghis'],
'Asshai': ["asshai'i", 'asshai'],
'Lysene': ['lysene', 'lyseni'],
'Andal': ['andal', 'andals'],
'Braavosi': ['braavosi', 'braavos'],
'Dornish': ['dornishmen', 'dorne', 'dornish'],
'Myrish': ['myr', 'myrish', 'myrmen'],
'Westermen': ['westermen', 'westerman', 'westerlands'],
'Westerosi': ['westeros', 'westerosi'],
'Stormlander': ['stormlands', 'stormlander'],
'Norvoshi': ['norvos', 'norvoshi'],
'Northmen': ['the north', 'northmen'],
'Free Folk': ['wildling', 'first men', 'free folk'],
'Qartheen': ['qartheen', 'qarth'],
'Reach': ['the reach', 'reach', 'reachmen'],
'Ironborn': ['ironborn', 'ironmen'],
'Mereen': ['meereen', 'meereenese'],
'RiverLands': ['riverlands', 'rivermen'],
'Vale': ['vale', 'valemen', 'vale mountain clans']
}
got_v1["culture"].fillna("x", inplace=True)
# Let's create a function to simplify the cultures
def get_cult(value):
value = value.lower()
v = [k for (k, v) in cult.items() if value in v]
return v[0] if len(v) > 0 else value.title()
got_v1.loc[:, "culture"] = [get_cult(x) for x in got_v1["culture"]]
# let's check the cahnges
got_v1['culture'].unique()
# We can see that now they are reduced
# Now it's time to take the mo
got_v1['culture_vale'] = np.where((got_v1['culture'] == "Vale") , 1,0)
got_v1['culture_northmen'] = np.where((got_v1['culture'] == "Northmen"), 1,0)
"""
Why this 2?
1) The Northmen culture is the one next to the wall in the north, is were
all the action happened. Many people died there and also the Stark House
was almost rid from the map.
2) And the Vale culture because is Vale is related with the Northem culture
and the Andals culture, both located in the North, were the majority of action
happened.
"""
# Now let's create another one related to noble.
# Let's take woman as the reference for 1 (e male in the general)
got_v1['noble_woman'] = np.where((got_v1['male'] == 0) & (got_v1['isNoble'] == 1 ), 1,0)
# ### Let's check the new variables with isAlive to see they are not
# following the dependent variable
################
# ### Now let's make some graphs!
# We only want to graph some variables, let's create a df with the columns we want to see
got_hist = pd.DataFrame.copy(got_v1)
col_drop = ['S.No', 'name', 'culture', 'dateOfBirth', 'mother',
'father', 'house','heir', 'spouse','m_mother',
'm_father', 'm_heir', 'm_house', 'm_spouse']
got_hist = got_hist.drop(col_drop, 1)
# Now let's graph
got_hist.hist(figsize = (16, 20), bins = 10, xlabelsize = 12, ylabelsize = 12)
##################
# ### Now let's rid some variables for our 1st aproach
# We do this to make an easy 1st approach and create our first model<br>
# Then we can see what happen and improve our model<br>
# We will try to rid those who are less relevant to continue
# Create a new df with the drop variables
got_num = pd.DataFrame.copy(got_v1)
got_num = got_num.drop(['name', 'culture', 'dateOfBirth', 'mother',
'father', 'heir', 'house', 'spouse','m_mother',
'm_father', 'm_heir', 'm_spouse',
'isAliveMother', 'isAliveFather',
'isAliveHeir', 'isAliveSpouse', 'title'], axis=1)
got_num['popularity'].round(2)
print(got_num.info())
# Now we rid all the missing values
###################
# ### Let's see mow the correlations between them
# Let's create a correlation between the remaining variables
# Creation of the corr()
got_corr = got_num.corr()
# Print the corr() the var we want to predict: isAlive
print(got_corr['isAlive'].sort_values())
"""
We see interesting results with good insights
Insights:
* If you appear in book 4 oyu have higher probability to be alive
* Age has a negative corr, what means that the older the worst
* Having many dead realations is not good to survive
* also being popular can cause your death
* The variables created using the dependent var (isAlive) have a strong corr() but only because
of that, we are not going to use them.
"""
##############
# Let's continue with other analysis: heatmap
# now let's do the graph of the heatmap
fig, ax=plt.subplots(figsize=(20,20))
sns.set(font_scale=2)
sns.heatmap(got_corr,
cmap = 'Blues',
square = True,
annot = False,
linecolor = 'black',
linewidths = 0.5)
#plt.savefig('correlation_matrix_all_var')
plt.show()
##################
# ### Let's see some scatterplots
# This is between the more relevant variables with isAlive
sns.set()
cols = ['dead_rela_1','numDeadRelations','popularity',
'dead_rela_4','popu_0.3','culture_vale','culture_northmen',
'age','book4_A_Feast_For_Crows', 'isAlive']
sns.pairplot(got_num[cols], height= 2.5)
plt.show();
# ### Let's focus only in some graphs that are interesting
sns.set()
cols = ['numDeadRelations','popularity',
'age', 'book4_A_Feast_For_Crows']
sns.pairplot(got_num[cols], height= 2.5)
plt.show();
"""
Here we can highlight some insights:
1) The most popular character are the one between 0 and 60. And being more popular
is dangerous, more popular = more chances to be dead
2) Also from the corr() we can see thar being older is worst for being alive.
"""
sns.regplot(x="popularity", y="numDeadRelations", data=got, color='b')
plt.axvline(.5, color='blue')
sns.regplot(x="popularity", y="age", data=got, color='b')
plt.axvline(.5, color='blue')
#################
# ### Let's see the outliers
for col in got_num:
sns.set()
plt.figure(figsize = (7, 3))
ax = sns.boxplot(x=got_num[col], data=got_num)
plt.setp(ax.artists, alpha=.5, linewidth=2, edgecolor="k")
plt.xticks(rotation=45)
# ***From the outlier analysis we see that is interesting the popularity variables***<br>
# ***The outliers begin upper than 0.2, there's a breakpoint***
##########################################################################################
# Model Creation
##########################################################################################
# The models that we are going to use are:
# * KNN Classification
# * Random Forest
# * GBM
#####################
# KNN Classifier Basic
#####################
# Let's start creating a basic model
x = got[[ #'title',
#'culture',
'male',
#'heir',
#'house',
'book1_A_Game_Of_Thrones',
#'book2_A_Clash_Of_Kings',
#'book3_A_Storm_Of_Swords',
'book4_A_Feast_For_Crows',
#'book5_A_Dance_with_Dragons',
'isMarried',
'isNoble',
#'age',
#'numDeadRelations',
'popularity']]
y = got.loc[:, 'isAlive']
seed = 508
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, stratify=y,shuffle=True,random_state=seed)
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 51)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors = n_neighbors)
clf.fit(x_train, y_train.values.ravel())
# record training set accuracy
training_accuracy.append(clf.score(x_train, y_train))
# record generalization accuracy
test_accuracy.append(clf.score(x_test, y_test))
print(test_accuracy.index(max(test_accuracy)) + 1)
fig, ax = plt.subplots(figsize=(12,9))
plt.plot(neighbors_settings, training_accuracy, label = "training accuracy")
plt.plot(neighbors_settings, test_accuracy, label = "test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
########################
# The best results occur when k = 7.
########################
# Building a model with k = 7
knn_clf = KNeighborsClassifier(n_neighbors = 7)
# Fitting the model based on the training data
knn_clf_fit = knn_clf.fit(x_train, y_train)
#knn_clf_fit = knn_clf.fit(X_train, y_train.values.ravel())
print('Training Score', knn_clf_fit.score(x_train, y_train).round(4))
print('Testing Score:', knn_clf_fit.score(x_test, y_test).round(4))
knn_clf_pred = knn_clf_fit.predict(x_test)
knn_clf_pred_probabilities = knn_clf_fit.predict_proba(x_test)
#print(knn_clf_pred)
#print(knn_clf_pred_probabilities)
# ***Here we get a not bad result without using the features created***
####################
# CONFUSION MATRIX
####################
print(confusion_matrix(y_true = y_test,
y_pred = knn_clf_pred))
labels = ['Alive-1', 'Not Alive-0']
cm = confusion_matrix(y_true = y_test,
y_pred = knn_clf_pred)
sns.heatmap(cm,
annot = True,
xticklabels = labels,
yticklabels = labels,
cmap = 'Blues')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion matrix of the classifier')
plt.show()
# Here we can see that the result of the matrix is bigger where we said that it will
# not be alive, and he is alive (is better in this case to have more error here)
################
# ### Now let's create a Random Forest
#################
################################
# Random Forest in scikit-learn (basic model)
###############################
# Let's create a basic model withput the features created first with the same vars
# Preparing a DataFrame based the the analysis above
x = got[[ 'male',
'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'isMarried',
'isNoble',
'popularity']]
y = got.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
X_train, X_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 500,
criterion = 'gini',
max_depth = None,
min_samples_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Full forest using entropy
full_forest_entropy = RandomForestClassifier(n_estimators = 500,
criterion = 'entropy',
max_depth = None,
min_samples_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(X_train, y_train)
full_entropy_fit = full_forest_entropy.fit(X_train, y_train)
# Scoring the gini model
print('Gini - Training Score:', full_gini_fit.score(X_train, y_train).round(4))
print('Gini - Testing Score:', full_gini_fit.score(X_test, y_test).round(4))
# Scoring the entropy model
print('Entropy - Training Score', full_entropy_fit.score(X_train, y_train).round(4))
print('Entropy - Testing Score:', full_entropy_fit.score(X_test, y_test).round(4))
# Here we see the same results than before with the same variables
# Here we get the following (Entropy is better):
# * Gini - Training Score: 0.7967
# * Gini - Testing Score: 0.8154
# * Entropy - Training Score 0.7967
# * Entropy - Testing Score: 0.8205
# ***Other thing that we see here is that the testing score is bigger than the training***
# ***The data is not overfited***
# ***Let's see now the importance of every variable to take some conclusions***
########################
# Feature importance function
########################
def plot_feature_importances(model, train = X_train, export = False):
fig, ax = plt.subplots(figsize=(12,9))
n_features = X_train.shape[1]
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(pd.np.arange(n_features), train.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
if export == True:
plt.savefig('Tree_Leaf_50_Feature_Importance.png')
########################
plot_feature_importances(full_gini_fit,
train = X_train,
export = False)
plot_feature_importances(full_entropy_fit,
train = X_train,
export = False)
# ***Here we can see which variables are the most important for this model:***
# The most important are:
# * popularity
# * book4_A_Feast_For_Crows
# Conclusion: try to be not so popular, but enough to appear in as many books as possible (and better if you are in book N 4)
#######################################################################################
############################### IMPROVED MODELS WITH E.F ##############################
#######################################################################################
###############
# KNN Classifier Improved
###############
# Let's pick the best variables for us to put in the model
# Let's start creating a basic model
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size=0.1,
stratify=y,
shuffle=True,
random_state=508)
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 51)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors = n_neighbors)
clf.fit(x_train, y_train.values.ravel())
# record training set accuracy
training_accuracy.append(clf.score(x_train, y_train))
# record generalization accuracy
test_accuracy.append(clf.score(x_test, y_test))
print(test_accuracy.index(max(test_accuracy)) + 1)
fig, ax = plt.subplots(figsize=(12,9))
plt.plot(neighbors_settings, training_accuracy, label = "training accuracy")
plt.plot(neighbors_settings, test_accuracy, label = "test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
########################
# The best results occur when k = 3.
########################
# Building a model with k = 3
knn_clf = KNeighborsClassifier(n_neighbors = 3)
# Fitting the model based on the training data
knn_clf_fit = knn_clf.fit(x_train, y_train)
#knn_clf_fit = knn_clf.fit(X_train, y_train.values.ravel())
print('Training Score', knn_clf_fit.score(x_train, y_train).round(4))
print('Testing Score:', knn_clf_fit.score(x_test, y_test).round(4))
knn_clf_pred = knn_clf_fit.predict(x_test)
knn_clf_pred_probabilities = knn_clf_fit.predict_proba(x_test)
#print(knn_clf_pred)
#print(knn_clf_pred_probabilities)
"""
Here we can see how important are the new variables put it in the model.
We get:
Training Score 0.9611
Testing Score: 0.9385
We can see that is not too overfit, we have a good balance.
Let's try to improve it in the following section.
"""
################################
# Random Forest in scikit-learn (IMPROVED)
###############################
# Let's create a basic model withput the features created first with the same vars
# Preparing a DataFrame based the the analysis above
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 500,
criterion = 'gini',
max_depth = None,
min_samples_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Full forest using entropy
full_forest_entropy = RandomForestClassifier(n_estimators = 500,
criterion = 'entropy',
max_depth = None,
min_samples_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(x_train, y_train)
full_entropy_fit = full_forest_entropy.fit(x_train, y_train)
# Scoring the gini model
print('Gini - Training Score:', full_gini_fit.score(x_train, y_train).round(4))
print('Gini - Testing Score:', full_gini_fit.score(x_test, y_test).round(4))
# Scoring the entropy model
print('Entropy - Training Score', full_entropy_fit.score(x_train, y_train).round(4))
print('Entropy - Testing Score:', full_entropy_fit.score(x_test, y_test).round(4))
# ***Here we get the following scores (Entropy is better):***
# * Gini - Training Score: 0.9451
# * Gini - Testing Score: 0.9436
# * Entropy - Training Score 0.9445
# * Entropy - Testing Score: 0.9282
########################
# Feature importance function
########################
def plot_feature_importances(model, train = x_train, export = False):
fig, ax = plt.subplots(figsize=(12,9))
n_features = x_train.shape[1]
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(pd.np.arange(n_features), train.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
if export == True:
plt.savefig('Tree_Leaf_50_Feature_Importance.png')
########################
plot_feature_importances(full_gini_fit,
train = x_train,
export = False)
plot_feature_importances(full_entropy_fit,
train = x_train,
export = False)
# Here we can see the importance of the variable age (fill in with the mean) and also popularity.
# Meaning that the older the bigger the chance of die and same with popularity. What is good is to appear in book N 4
#############################
# ### Now let's try to improve the model with RandmizedSearchCV
#############################
# Is important to say that Randomized was chosen over Grid because of the time processing.
# And because of the deadline to present, with more time Grid is better in terms of improvement.
##################
# Tuned Parameters
##################
#############################
# Applying RandomizedSearchCV in Random Forest
############################
# Creating a hyperparameter grid
estimator_space = pd.np.arange(100, 1350, 250)
leaf_space = | pd.np.arange(1, 150, 15) | pandas.np.arange |
#!/usr/bin/env python
# coding: utf-8
# In[27]:
import pandas as pd
import numpy as np
import sys
import datetime as dt
import argparse
import re
import copy
import os
from itertools import chain
# In[28]:
#function to break skills, order them and concetenate. order is important so that skill_a+skill_b is the same as skill_B+skill_a
def concetanete_skills(skills):
if skills is None or skills is np.nan:
return skills
skills = skills.split('~~')
skills = sorted(skills)
return '+'.join(skills)
# In[29]:
def logProgressToWfl(progressMsg):
logFile = open("multiskillConverterLog.wfl", "a")
now = dt.datetime.now()
progressPrepend = "%Progress::"
logFile.write(progressPrepend + "@" + str(now) + "@" + progressMsg + "\n");
logFile.close();
# In[ ]:
#C:/ProgramData/Anaconda3/Python multiskill_converter.py -programDir . -workingDir . -userId 1 -kcModelsToConvert_nodeIndex 0 -kcModelsToConvert_fileIndex 0 -kcModelsToConvert "KC (CCSS)" -kcModelsToConvert_nodeIndex 0 -kcModelsToConvert_fileIndex 0 -kcModelsToConvert "KC (MATHia New)" -multiskillConversionMethod "Concatenate" -node 0 -fileIndex 0 C:\WPIDevelopment\dev06_dev\WorkflowComponents\MultiskillConverter\test\test_data\test.txt -inputFile test.txt
#C:/ProgramData/Anaconda3/Python multiskill_converter.py -programDir . -workingDir . -userId 1 -kcModelToConvert_nodeIndex 0 -kcModelToConvert_fileIndex 0 -kcModelToConvert "KC (MATHia New)" -multiskillConversionMethod "Split to Multiple Rows" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Correct Step Duration (sec)" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Step Duration (sec)" -node 0 -fileIndex 0 C:\WPIDevelopment\dev06_dev\WorkflowComponents\MultiskillConverter\test\test_data\test.txt -inputFile test.txt
#C:/ProgramData/Anaconda3/Python multiskill_converter.py -programDir . -workingDir . -userId 1 -kcModelToConvert_nodeIndex 0 -kcModelToConvert_fileIndex 0 -kcModelToConvert "KC (MATHia New)" -multiskillConversionMethod "Split to Multiple Rows" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Step End Time" -valuesToBeSplit_nodeIndex 0 -valuesToBeSplit_fileIndex 0 -valuesToBeSplit "Step Duration (sec)" -node 0 -fileIndex 0 C:\WPIDevelopment\dev06_dev\WorkflowComponents\MultiskillConverter\test\test_data\test.txt -inputFile test.txt
#command line
parser = argparse.ArgumentParser(description='Process datashop file.')
parser.add_argument('-programDir', type=str, help='the component program directory')
parser.add_argument('-workingDir', type=str, help='the component instance working directory')
parser.add_argument("-node", nargs=1, action='append')
parser.add_argument("-fileIndex", nargs=2, action='append')
parser.add_argument('-multiskillConversionMethod', choices=["Concatenate", "Split to Multiple Rows"], help='Method to handle multiskill steps(default="Concatenate")', default="Concatenate")
parser.add_argument('-kcModelsToConvert', nargs=1, action='append', type=str, help='KC models to convert when concatenating; e.g., "Item"')
parser.add_argument('-kcModelToConvert', nargs=1, type=str, help='KC model to convert when Split to Multiple Rows; e.g., "Item"')
parser.add_argument('-valuesToBeSplit', nargs=1, action='append', type=str, help='KC model to convert when Split to Multiple Rows;')
parser.add_argument('-averageColumnValues', choices=["Yes", "No"], help='If any column value should be averaged(default="No")', default="Concatenate")
parser.add_argument('-inputFile', type=str, help='data file containing multi-skill steps')
parser.add_argument('-userId', type=str, help='placeholder for WF', default='')
args, option_file_index_args = parser.parse_known_args()
filename = args.inputFile
modification_method = args.multiskillConversionMethod
kcms_to_change = args.kcModelsToConvert
if kcms_to_change is not None:
kcms_to_change = list(chain.from_iterable(kcms_to_change))
kcm_to_split = args.kcModelToConvert
if kcm_to_split is not None:
kcm_to_split = kcm_to_split[0]
columns_value_to_be_split = args.valuesToBeSplit
if columns_value_to_be_split is not None:
columns_value_to_be_split = list(chain.from_iterable(columns_value_to_be_split))
average_column_values = args.averageColumnValues
if average_column_values is not None and average_column_values == "Yes":
average_column_values = True
else:
average_column_values = False
# In[30]:
if False:
filename = 'test.txt'
#modification_method = 'Concatenate'
kcms_to_change = ['KC (CCSS)', 'KC (MATHia New)']
modification_method = 'Split to Multiple Rows'
kcm_to_split = 'KC (MATHia New)'
columns_value_to_be_split = ['Step Duration (sec)', 'Correct Step Duration (sec)']
average_column_values = True
# In[31]:
df = pd.read_csv(filename, dtype=str, na_values = ['null', 'na', 'NA', 'n/a', 'nan'], sep="\t", encoding = "ISO-8859-1")
if modification_method == 'Concatenate':
for kcm_to_change in kcms_to_change:
print(kcm_to_change)
if kcm_to_change in df.columns:
#change ~~ to +
df[kcm_to_change] = df[kcm_to_change].apply(concetanete_skills)
#get KC model name without prefix "KC(""
kcm_name = kcm_to_change
if "KC (" in kcm_to_change and ")" in kcm_to_change:
kc_name = kcm_to_change[len("KC ("):kcm_to_change.find(")")]
kcm_opportunity = "Opportunity ({})".format(kc_name)
if kcm_opportunity in df.columns:
df.drop(kcm_opportunity, axis=1, inplace=True)
df_omit_na = df[['Anon Student Id', kcm_to_change]]
df_omit_na = df_omit_na.dropna()
df_omit_na[kcm_opportunity] = df_omit_na.groupby(['Anon Student Id', kcm_to_change]).cumcount()+1
df_omit_na = df_omit_na[[kcm_opportunity]]
df = df.merge(df_omit_na, left_index=True, right_index=True, how='left')
filename = os.path.basename(os.path.normpath(filename))
df.to_csv('multiskill_converted_{}'.format(filename), sep='\t', index=False)
elif modification_method == 'Split to Multiple Rows':
proc_pct = 0.1
totalCnt = df.shape[0]
if kcm_to_split in df.columns:
#make a new dataframe
split_df = pd.DataFrame(columns = df.columns)
#loop through each rows
cnt = 1
for index, row in df.iterrows():
#write to the workflow log for percentage processed
if cnt/totalCnt > proc_pct:
logProgressToWfl("{:.0%}".format(proc_pct))
proc_pct = proc_pct + 0.1
cnt = cnt + 1
#process skills
skills = row[kcm_to_split]
if skills is None or | pd.isna(skills) | pandas.isna |
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import os
import sys
import numpy as np
import pandas as pd
from .Error import DemandInputError
from .Logger import FastTripsLogger
from .Route import Route
from .TAZ import TAZ
from .Trip import Trip
from .Util import Util
class Passenger(object):
"""
Passenger class.
One instance represents all of the households and persons that could potentially make transit trips.
Stores household information in :py:attr:`Passenger.households_df` and person information in
:py:attr:`Passenger.persons_df`, which are both :py:class:`pandas.DataFrame` instances.
"""
#: File with households
INPUT_HOUSEHOLDS_FILE = "household.txt"
#: Households column: Household ID
HOUSEHOLDS_COLUMN_HOUSEHOLD_ID = 'hh_id'
#: File with persons
INPUT_PERSONS_FILE = "person.txt"
#: Persons column: Household ID
PERSONS_COLUMN_HOUSEHOLD_ID = HOUSEHOLDS_COLUMN_HOUSEHOLD_ID
#: Persons column: Person ID (string)
PERSONS_COLUMN_PERSON_ID = 'person_id'
# ========== Added by fasttrips =======================================================
#: Persons column: Person ID number
PERSONS_COLUMN_PERSON_ID_NUM = 'person_id_num'
#: File with trip list
INPUT_TRIP_LIST_FILE = "trip_list.txt"
#: Trip list column: Person ID
TRIP_LIST_COLUMN_PERSON_ID = PERSONS_COLUMN_PERSON_ID
#: Trip list column: Person Trip ID
TRIP_LIST_COLUMN_PERSON_TRIP_ID = "person_trip_id"
#: Trip list column: Origin TAZ ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID = "o_taz"
#: Trip list column: Destination TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID = "d_taz"
#: Trip list column: Mode
TRIP_LIST_COLUMN_MODE = "mode"
#: Trip list column: Departure Time. DateTime.
TRIP_LIST_COLUMN_DEPARTURE_TIME = 'departure_time'
#: Trip list column: Arrival Time. DateTime.
TRIP_LIST_COLUMN_ARRIVAL_TIME = 'arrival_time'
#: Trip list column: Time Target (either 'arrival' or 'departure')
TRIP_LIST_COLUMN_TIME_TARGET = 'time_target'
# ========== Added by fasttrips =======================================================
#: Trip list column: Unique numeric ID for this passenger/trip
TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM = "trip_list_id_num"
#: Trip list column: Origin TAZ Numeric ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID_NUM = "o_taz_num"
#: Trip list column: Destination Numeric TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID_NUM = "d_taz_num"
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN = 'departure_time_min'
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN = 'arrival_time_min'
#: Trip list column: Transit Mode
TRIP_LIST_COLUMN_TRANSIT_MODE = "transit_mode"
#: Trip list column: Access Mode
TRIP_LIST_COLUMN_ACCESS_MODE = "access_mode"
#: Trip list column: Egress Mode
TRIP_LIST_COLUMN_EGRESS_MODE = "egress_mode"
#: Trip list column: Outbound (bool), true iff time target is arrival
TRIP_LIST_COLUMN_OUTBOUND = "outbound"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (arrival time)
TIME_TARGET_ARRIVAL = "arrival"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (departure time)
TIME_TARGET_DEPARTURE = "departure"
#: Generic transit. Specify this for mode when you mean walk, any transit modes, walk
#: TODO: get rid of this? Maybe user should always specify.
MODE_GENERIC_TRANSIT = "transit"
#: Generic transit - Numeric mode number
MODE_GENERIC_TRANSIT_NUM = 1000
#: Minumum Value of Time: 1 dollar shouldn't be worth 180 minutes
MIN_VALUE_OF_TIME = 60.0/180.0
#: Trip list column: User class. String.
TRIP_LIST_COLUMN_USER_CLASS = "user_class"
#: Trip list column: Purpose. String.
TRIP_LIST_COLUMN_PURPOSE = "purpose"
#: Trip list column: Value of time. Float.
TRIP_LIST_COLUMN_VOT = "vot"
#: Trip list column: Trace. Boolean.
TRIP_LIST_COLUMN_TRACE = "trace"
#: Column names from pathfinding
PF_COL_PF_ITERATION = 'pf_iteration' #: 0.01*pathfinding_iteration + iteration during which this path was found
PF_COL_PAX_A_TIME = 'pf_A_time' #: time path-finder thinks passenger arrived at A
PF_COL_PAX_B_TIME = 'pf_B_time' #: time path-finder thinks passenger arrived at B
PF_COL_LINK_TIME = 'pf_linktime' #: time path-finder thinks passenger spent on link
PF_COL_LINK_FARE = 'pf_linkfare' #: fare path-finder thinks passenger spent on link
PF_COL_LINK_COST = 'pf_linkcost' #: cost (generalized) path-finder thinks passenger spent on link
PF_COL_LINK_DIST = 'pf_linkdist' #: dist path-finder thinks passenger spent on link
PF_COL_WAIT_TIME = 'pf_waittime' #: time path-finder thinks passenger waited for vehicle on trip links
PF_COL_PATH_NUM = 'pathnum' #: path number, starting from 0
PF_COL_LINK_NUM = 'linknum' #: link number, starting from access
PF_COL_LINK_MODE = 'linkmode' #: link mode (Access, Trip, Egress, etc)
PF_COL_MODE = TRIP_LIST_COLUMN_MODE #: supply mode
PF_COL_ROUTE_ID = Trip.TRIPS_COLUMN_ROUTE_ID #: link route ID
PF_COL_TRIP_ID = Trip.TRIPS_COLUMN_TRIP_ID #: link trip ID
PF_COL_DESCRIPTION = 'description' #: path text description
#: todo replace/rename ??
PF_COL_PAX_A_TIME_MIN = 'pf_A_time_min'
#: pathfinding results
PF_PATHS_CSV = r"enumerated_paths.csv"
PF_LINKS_CSV = r"enumerated_links.csv"
#: results - PathSets
PATHSET_PATHS_CSV = r"pathset_paths.csv"
PATHSET_LINKS_CSV = r"pathset_links.csv"
def __init__(self, input_dir, output_dir, today, stops, routes, capacity_constraint):
"""
Constructor from dictionary mapping attribute to value.
"""
# if no demand dir, nothing to do
if input_dir == None:
self.trip_list_df = pd.DataFrame()
return
FastTripsLogger.info("-------- Reading demand --------")
FastTripsLogger.info("Capacity constraint? %x" % capacity_constraint )
self.trip_list_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_TRIP_LIST_FILE),
skipinitialspace=True, ##LMZ
dtype={Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID :'S',
Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID :'S',
Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID:'S',
Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME :'S',
Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME :'S',
Passenger.TRIP_LIST_COLUMN_PURPOSE :'S'})
trip_list_cols = list(self.trip_list_df.columns.values)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_TIME_TARGET in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_VOT in trip_list_cols)
FastTripsLogger.debug("=========== TRIP LIST ===========\n" + str(self.trip_list_df.head()))
FastTripsLogger.debug("\n"+str(self.trip_list_df.index.dtype)+"\n"+str(self.trip_list_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.trip_list_df), "person trips", Passenger.INPUT_TRIP_LIST_FILE))
# Error on missing person ids or person_trip_ids
missing_person_ids = self.trip_list_df[pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])]
if len(missing_person_ids)>0:
error_msg = "Missing person_id or person_trip_id fields:\n%s\n" % str(missing_person_ids)
error_msg += "Use 0 for person_id for trips without corresponding person."
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Drop (warn) on missing origins or destinations
missing_ods = self.trip_list_df[ pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ]
if len(missing_ods)>0:
FastTripsLogger.warn("Missing origin or destination for the following trips. Dropping.\n%s" % str(missing_ods))
self.trip_list_df = self.trip_list_df.loc[ pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID ])&
pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ].reset_index(drop=True)
FastTripsLogger.warn("=> Have %d person trips" % len(self.trip_list_df))
non_zero_person_ids = len(self.trip_list_df.loc[self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID]!="0"])
if non_zero_person_ids > 0 and os.path.exists(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE)):
self.persons_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE),
skipinitialspace=True,
dtype={Passenger.PERSONS_COLUMN_PERSON_ID:'S'})
self.persons_id_df = Util.add_numeric_column(self.persons_df[[Passenger.PERSONS_COLUMN_PERSON_ID]],
id_colname=Passenger.PERSONS_COLUMN_PERSON_ID,
numeric_newcolname=Passenger.PERSONS_COLUMN_PERSON_ID_NUM)
self.persons_df = pd.merge(left=self.persons_df, right=self.persons_id_df,
how="left")
persons_cols = list(self.persons_df.columns.values)
FastTripsLogger.debug("=========== PERSONS ===========\n" + str(self.persons_df.head()))
FastTripsLogger.debug("\n"+str(self.persons_df.index.dtype)+"\n"+str(self.persons_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.persons_df), "persons", Passenger.INPUT_PERSONS_FILE))
self.households_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_HOUSEHOLDS_FILE), skipinitialspace=True)
household_cols = list(self.households_df.columns.values)
FastTripsLogger.debug("=========== HOUSEHOLDS ===========\n" + str(self.households_df.head()))
FastTripsLogger.debug("\n"+str(self.households_df.index.dtype)+"\n"+str(self.households_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.households_df), "households", Passenger.INPUT_HOUSEHOLDS_FILE))
else:
self.persons_df = pd.DataFrame()
self.households_df = pd.DataFrame()
# make sure that each tuple TRIP_LIST_COLUMN_PERSON_ID, TRIP_LIST_COLUMN_PERSON_TRIP_ID is unique
self.trip_list_df["ID_dupes"] = self.trip_list_df.duplicated(subset=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID],
keep=False)
if self.trip_list_df["ID_dupes"].sum() > 0:
error_msg = "Duplicate IDs (%s, %s) found:\n%s" % \
(Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
self.trip_list_df.loc[self.trip_list_df["ID_dupes"]==True].to_string())
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Create unique numeric index
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM] = self.trip_list_df.index + 1
# datetime version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: Util.read_time(x))
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: Util.read_time(x))
# float version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
# TODO: validate fields?
# value of time must be greater than a threshhold or any fare becomes prohibitively expensive
low_vot = self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME ]
if len(low_vot) > 0:
FastTripsLogger.warn("These trips have value of time lower than the minimum threshhhold (%f): raising to minimum.\n%s" %
(Passenger.MIN_VALUE_OF_TIME, str(low_vot) ))
self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME,
Passenger.TRIP_LIST_COLUMN_VOT] = Passenger.MIN_VALUE_OF_TIME
if len(self.persons_df) > 0:
# Join trips to persons
self.trip_list_df = pd.merge(left=self.trip_list_df, right=self.persons_df,
how='left',
on=Passenger.TRIP_LIST_COLUMN_PERSON_ID)
# are any null?
no_person_ids = self.trip_list_df.loc[ | pd.isnull(self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID_NUM]) | pandas.isnull |
# TODO (Dan) What should this file be called?
import pandas as pd
import numpy as np
from collections import defaultdict
from itertools import product
from pathlib import Path
class CountsWeighter:
"""Weight kmer counts by a collection of PWMs.
Parameters
----------
pwm_dir: str (default=None)
Path to directory containing pwm files patterned *.txt.
counts: str | ndarray | DataFrame (default=None)
(Path to) kmer counts matrix.
k: int
Length of kmer.
out_path: str (default=None)
Path to .csv file for weighted counts
Attributes
----------
k_sub: int (default=4)
Length of sub_kmers to use if motif length is less than k.
Currently hard-coded to 4.
kmers: list
Str elements of all kmers of size k
df: pd.DataFrame (default=None)
# TODO (Dan) One line description of contents
"""
def __init__(self, pwm_dir=None, counts=None, k=5, out_path=None):
self.pwm_dir = pwm_dir
if pwm_dir is not None:
self.pwm_dir = Path(pwm_dir)
self.counts = counts
self.k = k
self.out_path = out_path
self.k_sub = 4
self.kmers = [''.join(p) for p in product('AGTC', repeat=self.k)]
self.df = None
if counts is not None: # get_counts depends on self.kmers
self.counts = self.get_counts(counts)
def get_counts(self, counts):
"""Load kmer counts matrix from .csv or .npy file, if necessary.
Parameters
----------
counts: str | ndarray | DataFrame
(Path to) kmer counts matrix.
Returns
-------
counts: DataFrame
Kmer counts matrix describing transcript kmer profiles.
"""
counts_types = (str, pd.DataFrame, np.ndarray)
err_msg = f'adj must be one of {counts_types}, not {type(counts)}.'
assert type(counts) in counts_types, err_msg
if isinstance(counts, str):
try:
counts = pd.read_csv(counts, index_col=0)
except UnicodeDecodeError:
counts = np.load(counts)
if isinstance(counts, np.ndarray):
counts = | pd.DataFrame(counts, columns=self.kmers) | pandas.DataFrame |
import os, re, json, sys, random, copy, argparse, torch
import numpy as np
import pandas as pd
from collections import OrderedDict
from tqdm import tqdm
from util import *
import warnings
warnings.filterwarnings('ignore')
# remove duplicates from df_a
def RemoveDuplicates(df_a, df_b, cols):
df = pd.concat([df_a, df_b[cols], df_b[cols]]).drop_duplicates(subset=cols, keep=False, ignore_index=True)
return df
# preprocessing for IEDB MS data
def MSPreprocess(filename, min_peptide_length, max_peptide_length):
df = pd.read_csv(filename)
# rename columns
rename_columns = ["%s - %s"%(i.split(".")[0], df.iloc[0][i]) for i in df.columns]
df = df.rename(columns={df.columns[i]: rename_columns[i] for i in range(len(rename_columns))})
df = df.drop(0)
# filter
df = df[["Reference - MHC ligand ID",
"Reference - PubMed ID",
"Epitope - Description",
"Epitope - Starting Position",
"Epitope - Ending Position",
"Epitope - Parent Protein Accession",
"Antigen Processing Cells - Cell Tissue Type",
"Antigen Processing Cells - Cell Type",
"MHC - Allele Name",
"Host - MHC Types Present"]]
# epitope length
df["Epitope - Length"] = df["Epitope - Description"].apply(lambda x: len(x))
df = df[(df["Epitope - Length"] >= min_peptide_length) & (df["Epitope - Length"] <= max_peptide_length)]
# mono-allelic
df = df[df["MHC - Allele Name"].str.match(r'^HLA-[A/B/C]\*\d+\:\d+$')]
df["MHC - Allele Name"] = df["MHC - Allele Name"].apply(lambda x: x.replace("HLA-",""))
return df
# preprocessing for IEDB assay data
def AssayPreprocess(filename, species, min_peptide_length, max_peptide_length):
df = pd.read_csv(filename, sep='\t')
df = df[df["species"] == species]
df = df[df["mhc"].str.contains("HLA-[ABC]\*\d+\:\d+")]
df["mhc"] = df["mhc"].apply(lambda x: x.replace("HLA-",""))
df = df[(df["peptide_length"] >= min_peptide_length) & (df["peptide_length"] <= max_peptide_length)]
df["value"] = df["meas"].apply(lambda x: max(1 - np.log10(x)/np.log10(50000), 0))
df["bind"] = (df["meas"] <= 500).astype(int)
df["source"] = "assay"
return df
# build hit dataframe
def BuildHit(df):
hit_df = df[[
"MHC - Allele Name",
"Epitope - Parent Protein Accession",
"Epitope - Starting Position",
"Epitope - Length",
"Epitope - Description"
]]
hit_df = hit_df.rename(columns={
"MHC - Allele Name": "mhc",
"Epitope - Parent Protein Accession": "protein",
"Epitope - Starting Position": "start_pos",
"Epitope - Length": "peptide_length",
"Epitope - Description": "sequence"
})
hit_df["meas"] = 1
hit_df["value"] = 1
hit_df["bind"] = 1
hit_df["source"] = "MS"
return hit_df
# build decoy from the same protein of the hit sample
def BuildProtDecoy(prot_dict, prot_len_dict, df, len_dict):
decoy_list = list()
alleles = list(df['mhc'].unique())
for allele in tqdm(alleles):
temp_df = df[(df['mhc'] == allele) & (df['bind'] == 1)]
prots = list(temp_df['protein'].unique())
for prot in prots:
pos_num = temp_df[temp_df['protein'] == prot].shape[0]
start_pos_list = list(temp_df[temp_df['protein'] == prot]['start_pos'].unique())
for length, multiple in len_dict.items():
decoy_num = multiple * pos_num
try:
candidate_pos = [i for i in range(prot_len_dict[prot] - length)
if i not in start_pos_list]
except:
continue
candidate_pos = random.sample(candidate_pos, min(len(candidate_pos), decoy_num))
for pos in candidate_pos:
d = {'mhc': allele,
'protein': prot,
'start_pos': pos,
'peptide_length': length,
'sequence': prot_dict[prot][pos: pos+length]}
decoy_list.append(d)
decoy_df = | pd.DataFrame(decoy_list) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
import altair as alt
import vega_datasets
import pandas as pd
import dash_bootstrap_components as dbc
app = dash.Dash(__name__, assets_folder='assets', external_stylesheets=[dbc.themes.CERULEAN])
app.config['suppress_callback_exceptions'] = True
server = app.server
app.title = 'Group112 Dash app: Unemployment'
def make_plot1(year_range=[2003,2005], stat = 'rate'): #Add in a default value to start with
#THEME
def mds_special():
font = "Arial"
axisColor = "#000000"
gridColor = "#DEDDDD"
return {
"config": {
"title": {
"fontSize": 24,
"font": font,
"anchor": "start", # equivalent of left-aligned.
"fontColor": "#000000"
},
'view': {
"height": 300,
"width": 400
},
"axisX": {
"domain": True,
#"domainColor": axisColor,
"gridColor": gridColor,
"domainWidth": 1,
"grid": False,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
"tickColor": axisColor,
"tickSize": 5, # default, including it just to show you can change it
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "X Axis Title (units)",
},
"axisY": {
"domain": False,
"grid": True,
"gridColor": gridColor,
"gridWidth": 1,
"labelFont": font,
"labelFontSize": 14,
"labelAngle": 0,
#"ticks": False, # even if you don't have a "domain" you need to turn these off.
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "Y Axis Title (units)",
# titles are by default vertical left of axis so we need to hack this
#"titleAngle": 0, # horizontal
#"titleY": -10, # move it up
#"titleX": 18, # move it to the right so it aligns with the labels
},
}
}
# register the custom theme under a chosen name
alt.themes.register('mds_special', mds_special)
# enable the newly registered theme
alt.themes.enable('mds_special')
#alt.themes.enable('none') # to return to default
#READ IN DATA
df_raw = pd.read_csv('../data/unemply_df_year.csv', index_col=0)
df = df_raw.drop(columns = ['count', 'rate'])
df = df_raw.pivot(index = 'industry', columns = 'year', values = 'total').reset_index()
new_df = pd.DataFrame(df["industry"])
if stat == "rate":
new_df["rate"] = round((df[year_range[1]] - df[year_range[0]]) / df[year_range[0]], 2)
cb = alt.Chart(new_df).mark_bar(size = 2).encode(
alt.X("rate:Q", title = "Percentage Change",
axis = alt.Axis(tickCount=10, format = '%')),
alt.Y("industry:O", title = ''),
color = alt.condition(alt.datum.rate > 0, alt.value("forestgreen"), alt.value("red")),
tooltip = ["rate"])
cp = alt.Chart(new_df).mark_point(size = 70, filled = True, opacity = 1).encode(
alt.X("rate:Q", title = "Percentage Change",
axis = alt.Axis(tickCount=10, format = '%')),
alt.Y("industry:O", title = ''),
color = alt.condition(alt.datum.rate > 0, alt.value("forestgreen"), alt.value("red")),
tooltip = ["rate"])
if stat == "count":
new_df["count"] = round(df[year_range[1]] - df[year_range[0]])
cb = alt.Chart(new_df).mark_bar(size = 2).encode(
alt.X("count:Q", title = "Absolute Change"),
alt.Y("industry:O", title = ''),
color = alt.condition(alt.datum.count > 0, alt.value("forestgreen"), alt.value("red")),
tooltip = ["count"])
cp = alt.Chart(new_df).mark_point(size = 70, filled = True, opacity = 1).encode(
alt.X("count:Q", title = "Absolute Change"),
alt.Y("industry:O", title = ''),
color = alt.condition(alt.datum.count > 0, alt.value("forestgreen"), alt.value("red")),
tooltip = ["count"])
return (cb + cp).properties(
width = 575,
height = 450
)
def make_plot2(industries = ["Agriculture", "Construction"], stat = "rate"): #Add in a default value to start with
#THEME
def mds_special():
font = "Arial"
axisColor = "#000000"
gridColor = "#DEDDDD"
return {
"config": {
"title": {
"fontSize": 24,
"font": font,
"anchor": "start", # equivalent of left-aligned.
"fontColor": "#000000"
},
'view': {
"height": 500,
"width": 1000
},
"axisX": {
"domain": True,
#"domainColor": axisColor,
"gridColor": gridColor,
"domainWidth": 1,
"grid": False,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
"tickColor": axisColor,
"tickSize": 5, # default, including it just to show you can change it
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "X Axis Title (units)",
},
"axisY": {
"domain": False,
"grid": True,
"gridColor": gridColor,
"gridWidth": 1,
"labelFont": font,
"labelFontSize": 14,
"labelAngle": 0,
#"ticks": False, # even if you don't have a "domain" you need to turn these off.
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "Y Axis Title (units)",
# titles are by default vertical left of axis so we need to hack this
#"titleAngle": 0, # horizontal
#"titleY": -10, # move it up
#"titleX": 18, # move it to the right so it aligns with the labels
},
}
}
# register the custom theme under a chosen name
alt.themes.register('mds_special', mds_special)
# enable the newly registered theme
alt.themes.enable('mds_special')
#alt.themes.enable('none') # to return to default
#READ IN DATA
df_raw = | pd.read_csv('../data/unemply_df_year.csv', index_col=0) | pandas.read_csv |
from calendar import monthrange
from datetime import datetime
import pandas as pd
from flask import Blueprint, jsonify, abort, g
from gatekeeping.api.budget import get_budget
from gatekeeping.api.position import get_positions
from gatekeeping.api.function import get_functions, get_function
from gatekeeping.api.user import get_user_function
def get_line_chart(function=None):
positions = get_positions(check_submitter=False)
budget = get_budget()
columns = [row.keys() for row in positions]
positions = pd.DataFrame(positions, columns=columns[0])
budget = pd.DataFrame(budget, columns=columns[0])
if function:
if function != 'All':
positions = positions.loc[positions['function'] == function]
budget = budget.loc[budget['function'] == function]
if g.user['type'] != 'ADMIN' and function == 'All':
functions = get_user_function(g.user['id'])
function_names = [get_function(function['function_id'])['name'] for function in functions]
positions = positions.loc[positions['function'].isin(function_names)]
budget = budget.loc[budget['function'].isin(function_names)]
positions['FTE'] = pd.to_numeric(positions['hours'], errors='coerce') / 40
budget['FTE'] = pd.to_numeric(budget['hours'], errors='coerce') / 40
positions['salary'] = pd.to_numeric(positions['salary'], errors='coerce')
positions['fringe_benefit'] = pd.to_numeric(positions['fringe_benefit'], errors='coerce')
positions['social_security_contribution'] = pd.to_numeric(positions['social_security_contribution'], errors='coerce')
budget['salary'] = pd.to_numeric(budget['salary'], errors='coerce')
budget['fringe_benefit'] = pd.to_numeric(budget['fringe_benefit'], errors='coerce')
budget['social_security_contribution'] = pd.to_numeric(budget['social_security_contribution'], errors='coerce')
positions['total_cost'] = positions['salary'].add(positions['fringe_benefit'], fill_value=0).add(positions['social_security_contribution'], fill_value=0)
budget['total_cost'] = budget['salary'].add(budget['fringe_benefit'], fill_value=0).add(budget['social_security_contribution'], fill_value=0)
positions['start_date'] = pd.to_datetime(positions['start_date'], errors='coerce')
positions['end_date'] = | pd.to_datetime(positions['end_date'], errors='coerce') | pandas.to_datetime |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
| pd.read_excel("test1" + read_ext, usecols=["E1", 0]) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Homogeneous Surface Diffusion Model (HSDM) for ion exchange (IX)
For an overview of ion exchange column modeling, consult:
<NAME>., 2013. Principles of ion exchange technology. Butterworth-Heinemann.
and
<NAME>. (1995). Ion exchange. Courier Corporation.
For details on the numerical method of solution (Orthogonal Collocation), consult:
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (1986).
Transport of organic compounds with saturated groundwater flow: Model development and
parameter sensitivity. Water Resources Research, 22(3), 271-284.
Assumptions:
Constant selectivity.
Plug flow.
Fickian diffusion.
Common mass transport parameters for all species.
TODO: Option to calculate film transfer coefficient from correlation.
TODO: Clean, test, double check bicarb/alka output
XXX: Needs a way to specify max_step in solve_ivp to avoid missing influent features
(NOTE: t_eval doesn't solve this problem. It just interpolates . . .)
@authors: <NAME>, <NAME>, <NAME>
"""
import timeit
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.integrate import solve_ivp
from .colloc import build_collocation, advect_operator
from .paramsheets import conv_time, conv_database, conv_conc, conv_params_data
bicarbMW = 61.02
def approx_Jac_struc(nr, NION, nz):
"""
Find approximate Jacobian structure to speed up BDF or Radau calculations
in solve_ivp
...
nr: Number of radial collocation points
NION: Number of species
nz: Number of axial collocation points
...
Because the order of the concentration array is (nr, NION, nz),
and the order of r is [C, q_r=0, ..., q_r=rb], the ion exchange zones
will be in the (NION*nz x NION*nz) corners of the Jacobian.
They have band structure, but we will assume they are dense for simplicity.
...
The Jacobian is dominated by a sparse banded structure from diffusion in the
solid phase. There are 2*nr - 1 diagonals.
...
Returns Jac_struc: an array of ones and zeros.
"""
NEQ = (nr+1) * NION * nz
nzni = NION * nz
Jac_struc = np.zeros((NEQ, NEQ))
# Diffusion zone
Jac_struc[nzni:, nzni:] += np.eye(NEQ-nzni, NEQ-nzni, k=0)
for ii in range(1, nr):
Jac_struc[nzni:, nzni:] += np.eye(NEQ-nzni, NEQ-nzni, k=(ii*nzni))
Jac_struc[nzni:, nzni:] += np.eye(NEQ-nzni, NEQ-nzni, k=-(ii*nzni))
# Block off corners (ion exchange zones)
Jac_struc[0:nzni, 0:nzni] = 1.0
Jac_struc[0:nzni, (nr)*nzni:] = 1.0
Jac_struc[(nr)*nzni:, 0:nzni] = 1.0
Jac_struc[(nr)*nzni:, (nr)*nzni:] = 1.0
return Jac_struc
class HSDMIX:
""" HSDM ion exchange: column process. Plug flow."""
def __init__(self, inp_file):
""" """
self.load_data(inp_file)
def load_data(self, inp_file):
"""
JBB: Unit options. Flow Rate, Diameter. ALKALINITY
TODO: Deal with case sensitivity: something that capitalizes everything...
"""
xls = pd.ExcelFile(inp_file)
self.params = pd.read_excel(xls, \
sheet_name='params',\
header = [0],\
index_col = [0])
self.params = conv_params_data(self.params)
self.params = self.params.drop('units', axis=1)['value'] #drops unused column
self.ions = pd.read_excel(xls, \
sheet_name='ions',\
header=[0],\
index_col=[0])
self.valences = self.ions['valence'].values
self.Cin_t = pd.read_excel(xls, \
sheet_name='Cin',\
header=[0], \
index_col = [0],
dtype=np.float64)
self.Cin_temp = self.Cin_t.copy(deep=False)
self.time_mult = self.params['time']
self.Cin_dict = self.ions.to_dict('index')
self.u_Cin2 = {}
self.val2 = {}
self.MW2 = {}
self.u_Cout2 = {}
for c in self.Cin_dict.keys():
self.u_Cin2[c] = self.Cin_dict[c]['units']
self.val2[c] = self.Cin_dict[c]['valence']
self.MW2[c] = self.Cin_dict[c]['mw']
self.u_Cout2[c] = 'meq'
self.C_out2, self.u_Cin2, self.u_C_out2 = conv_database(self.Cin_temp, \
self.u_Cin2, \
self.u_Cout2, \
conv_conc, \
self.MW2, \
self.val2)
if 'BICARBONATE' not in self.C_out2.columns:
if 'ALKALINITY' in self.C_out2.columns:
# initialize a column that is the same size as other columns
self.C_out2['BICARBONATE'] = self.C_out2['ALKALINITY'] * 1.
self.ions.loc['BICARBONATE'] = self.ions.loc['ALKALINITY']
self.ions.at['BICARBONATE','mw'] = bicarbMW
self.ions.at['BICARBONATE','units'] = 'mg'
pH_exp = self.C_out2['PH'] - 10. #convenience
self.C_out2['BICARBONATE'] = (self.C_out2['ALKALINITY'] - 5. * 10 **pH_exp)/\
(1. + 0.94 * 10**pH_exp)
else:
print('Warning: No BICARBONATE or ALKALINITY concentration defined.')
#clean up un needed columns
if 'ALKALINITY' in self.C_out2.columns:
self.C_out2 = self.C_out2.drop('ALKALINITY', axis=1)
self.ions = self.ions.drop('ALKALINITY')
if 'PH' in self.C_out2.columns:
self.C_out2 = self.C_out2.drop('PH', axis=1)
self.names = self.ions.index.values
def save_results(self, output_file_name, **kwargs):
'''
Returns:
generate and write a *.xlsx file in the parent directory;
sheet_name = Cout;
*** convert results from solver() to the requested uints; ***
*** convert results from solver() to the input units if units are not specified; ***
Parameters:
output_file_name : file name as string;
period : string;
units : string;
*** takes units from the input file if units are not specified; ***
'''
period = kwargs.get('period', 'hours')
units = kwargs.get('units', None)
u_Cout2 = {}
if units == None:
u_Cout2 = self.u_Cin2
else:
for c in self.Cin_dict.keys():
u_Cout2[c] = units
u_Cin2 = {}
for c in self.Cin_dict.keys():
u_Cin2[c] = 'meq'
temp_t = pd.Series(self.result.t * self.timeback)
tmp_u = self.u_result[0,:,-1,:]
if period == 'BV':
bv = temp_t / (self.params['L'] / self.params['v'])
idx = | pd.Index(bv, name=period) | pandas.Index |
import numpy as np
import pandas as pd
import pytest
from rnanorm.normalization import fpkm
def test_fpkm_normalization():
"""Test FPKM formula implementation.
FPKM expressions of a minimal example have been manually computed and
compared against implementation.
Expression data:
S1 S2
ENSG00000136807 1000 400
ENSG00000176903 2000 24000
ENSG00000241490 17000 600
Gene lengths:
ENSG00000136807 1000
ENSG00000176903 2000
ENSG00000241490 5000
Manually computed FPKM (rounded):
S1 S2
ENSG00000136807 1 2
ENSG00000176903 2 1.5
ENSG00000241490 1 1
"""
genes = ["ENSG00000136807", "ENSG00000176903", "ENSG00000241490"]
gene_lengths = [1000, 2000, 5000]
expressions = [[1000, 400], [2000, 24000], [17000, 600]]
manually_computed_FPKM = [[50000, 16000], [50000, 480000], [170000, 4800]]
X = pd.DataFrame(expressions, index=genes, columns=["S1", "S2"])
y = pd.DataFrame(gene_lengths, index=genes, columns=["GENE_LENGTHS"])
FPKM = fpkm(X, y)
assert np.all(np.asarray(FPKM, dtype=int) == manually_computed_FPKM) # Test Pandas array
FPKM = fpkm(X.to_numpy(), y.to_numpy())
assert np.all(FPKM.astype(int) == manually_computed_FPKM) # Test Numpy array
def test_devision_by_zero():
genes = ["ENSG00000136807", "ENSG00000176903", "ENSG00000241490"]
gene_lengths = [1000, 2000, 3000]
expressions = [[0], [0], [0]]
expected_FPKM = [[0], [0], [0]]
X = pd.DataFrame(expressions, index=genes, columns=["S1"])
y = pd.DataFrame(gene_lengths, index=genes, columns=["GENE_LENGTHS"])
FPKM = fpkm(X.to_numpy(), y.to_numpy())
print(FPKM)
assert np.all(FPKM.astype(int) == expected_FPKM)
def test_geneset_mismatch():
genes = ["ENSG00000136807", "ENSG00000176903", "ENSG00000241490"]
gene_lengths = [3000, 2000]
expressions = [[0], [0], [0]]
expected_FPKM = [[0], [0]]
X = | pd.DataFrame(expressions, index=genes, columns=["S1"]) | pandas.DataFrame |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
EnergyTimeShift.py
This Python class contains methods and attributes specific for service analysis within StorageVet.
"""
from storagevet.ValueStreams.ValueStream import ValueStream
import numpy as np
import cvxpy as cvx
import pandas as pd
from storagevet.Finances import Financial
import storagevet.Library as Lib
from storagevet.ErrorHandling import *
SATURDAY = 5
class EnergyTimeShift(ValueStream):
""" Retail energy time shift. A behind the meter service.
"""
def __init__(self, params):
""" Generates the objective function, finds and creates constraints.
Args:
params (Dict): input parameters
"""
ValueStream.__init__(self, 'retailETS', params)
self.price = params['price']
self.tariff = params['tariff']
self.growth = params['growth']/100
self.billing_period_bill = pd.DataFrame()
self.monthly_bill = pd.DataFrame()
def grow_drop_data(self, years, frequency, load_growth):
""" Adds data by growing the given data OR drops any extra data that might have slipped in.
Update variable that hold timeseries data after adding growth data. These method should be called after
add_growth_data and before the optimization is run.
Args:
years (List): list of years for which analysis will occur on
frequency (str): period frequency of the timeseries data
load_growth (float): percent/ decimal value of the growth rate of loads in this simulation
"""
data_year = self.price.index.year.unique()
no_data_year = {pd.Period(year) for year in years} - {pd.Period(year) for year in data_year} # which years do we not have data for
if len(no_data_year) > 0:
for yr in no_data_year:
source_year = pd.Period(max(data_year))
years = yr.year - source_year.year
# Build Energy Price Vector based on the new year
new_index = Lib.create_timeseries_index([yr.year], frequency)
temp = pd.DataFrame(index=new_index)
weekday = (new_index.weekday < SATURDAY).astype('int64')
he = (new_index + pd.Timedelta('1s')).hour + 1
temp['price'] = np.zeros(len(new_index))
for p in range(len(self.tariff)):
# edit the pricedf energy price and period values for all of the periods defined
# in the tariff input file
bill = self.tariff.iloc[p, :]
mask = Financial.create_bill_period_mask(bill, temp.index.month, he, weekday)
current_energy_prices = temp.loc[mask, 'price'].values
if np.any(np.greater(current_energy_prices, 0)):
# More than one energy price applies to the same time step
TellUser.warning('More than one energy price applies to the same time step.')
# Add energy prices
temp.loc[mask, 'price'] += bill['Value']
# apply growth to new energy rate
new_p_energy = temp['price']*(1+self.growth)**years
self.price = pd.concat([self.price, new_p_energy], sort=True) # add to existing
def objective_function(self, mask, load_sum, tot_variable_gen, generator_out_sum, net_ess_power, annuity_scalar=1):
""" Generates the full objective function, including the optimization variables.
Args:
mask (DataFrame): A boolean array that is true for indices corresponding to time_series data included
in the subs data set
tot_variable_gen (Expression): the sum of the variable/intermittent generation sources
load_sum (list, Expression): the sum of load within the system
generator_out_sum (list, Expression): the sum of conventional generation within the system
net_ess_power (list, Expression): the sum of the net power of all the ESS in the system. [= charge - discharge]
annuity_scalar (float): a scalar value to be multiplied by any yearly cost or benefit that helps capture the cost/benefit over
the entire project lifetime (only to be set iff sizing)
Returns:
A dictionary with expression of the objective function that it affects. This can be passed into the cvxpy solver.
"""
size = sum(mask)
price = cvx.Parameter(size, value=self.price.loc[mask].values, name='energy_price')
load_price = cvx.multiply(price, load_sum)
ess_net_price = cvx.multiply(price, net_ess_power)
variable_gen_prof = cvx.multiply(-price, tot_variable_gen)
generator_prof = cvx.multiply(-price, generator_out_sum)
cost = cvx.sum(load_price + ess_net_price + variable_gen_prof + generator_prof)
return {self.name: cost * self.dt * annuity_scalar}
def timeseries_report(self):
""" Summaries the optimization results for this Value Stream.
Returns: A timeseries dataframe with user-friendly column headers that summarize the results
pertaining to this instance
"""
report = | pd.DataFrame(index=self.price.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
from package import dataHandler as dh
from package import featureHandler as fh
def get_effect_sizes(participants,features,visual_features=None,return_dataframe=False):
"""
features: df sampled at a specific fs
"""
PD_participants = dh.df_retrieve(participants,{'is PD': True})
PD_sway_files = PD_participants['EC sway file'].dropna().tolist() + PD_participants['EO sway file'].dropna().tolist()
control_participants = dh.df_retrieve(participants,{'is PD': False})
control_sway_files = control_participants['EC sway file'].dropna().tolist() + control_participants['EO sway file'].dropna().tolist()
feature_names, visual_feature_names = fh.get_feature_names()
effect_size = | pd.DataFrame(columns=feature_names) | pandas.DataFrame |
import pandas as pd
import os
from oonipipeline.config import UPLOAD_PATH
from flask import request, send_from_directory, after_this_request
from flask_restful import Resource
from oonipipeline.models.models import MetaTableSchema, MetaTable, DnsConsistency, DnsTestKeys, Errors, Failed, Inconsistent,\
DnsQueries, DnsAnswers, DnsConsistencySchema, DnsTestKeysSchema, ErrorsSchema, FailedSchema, InconsistentSchema, \
DnsQueriesSchema, DnsAnswersSchema, db
class DnsConsistencyResource(Resource):
"""
GET: Downloads specified tables through path parameters
"""
def get(self, table=None):
set_id = request.args.get('set_id')
api_download = request.args.get('api_download')
if not table:
if not set_id:
# /api/dns_consistency
# returns all dns_consistency data sets from meta_table
query = db.session.query(MetaTable).filter_by(test_name='dns_consistency').all()
result = MetaTableSchema(many=True).dump(query).data
return {'status': 'success', 'data': result}
else:
# /api/dns_consistency?set_id={set_id}
# returns specified data set from dns_consistency table
query = db.session.query(DnsConsistency).filter_by(set_id=set_id).all()
result = DnsConsistencySchema(many=True).dump(query).data
return {'status': 'success', 'data': result}
if not set_id:
return {'status': 'error', 'message': 'Please provide a set_id'}, 400
else:
if not db.session.query(MetaTable).filter_by(set_id=set_id).all():
return {'status': 'error', 'message': 'Please provide a valid set_id'}, 400
if table == 'dns_consistency':
query = db.session.query(DnsConsistency).filter_by(set_id=set_id).all()
result = DnsConsistencySchema(many=True).dump(query).data
df = pd.DataFrame(result)
filename = 'set_' + set_id + '-dns_consistency.csv'
df.to_csv(os.path.join(UPLOAD_PATH, filename))
if api_download != 'true':
@after_this_request
def remove_csv(response):
os.remove(os.path.join(UPLOAD_PATH, filename))
return response
return send_from_directory(UPLOAD_PATH, filename, mimetype="text/csv", as_attachment=True)
return {'status': 'success', 'message': 'Please check your home directory to see the downloaded csv '
'file'}
elif table == 'dns_answers':
query = db.session.query(DnsAnswers).join(DnsQueries).join(DnsTestKeys).join(DnsConsistency).\
filter_by(set_id=set_id).all()
result = DnsAnswersSchema(many=True).dump(query).data
df = pd.DataFrame(result)
filename = 'set_' + set_id + '-dns_answers.csv'
df.to_csv(os.path.join(UPLOAD_PATH, filename))
if api_download != 'true':
@after_this_request
def remove_csv(response):
os.remove(os.path.join(UPLOAD_PATH, filename))
return response
return send_from_directory(UPLOAD_PATH, filename, mimetype="text/csv", as_attachment=True)
return {'status': 'success', 'message': 'Please check your home directory to see the downloaded csv '
'file'}
elif table == 'dns_queries':
query = db.session.query(DnsQueries).join(DnsTestKeys).join(DnsConsistency).\
filter_by(set_id=set_id).all()
result = DnsQueriesSchema(many=True).dump(query).data
df = pd.DataFrame(result)
filename = 'set_' + set_id + '-dns_queries.csv'
df.to_csv(os.path.join(UPLOAD_PATH, filename))
if api_download != 'true':
@after_this_request
def remove_csv(response):
os.remove(os.path.join(UPLOAD_PATH, filename))
return response
return send_from_directory(UPLOAD_PATH, filename, mimetype="text/csv", as_attachment=True)
return {'status': 'success', 'message': 'Please check your home directory to see the downloaded csv '
'file'}
elif table == 'dns_inconsistent':
query = db.session.query(Inconsistent).join(DnsTestKeys).join(DnsConsistency).filter_by(set_id=set_id).\
all()
result = InconsistentSchema(many=True).dump(query).data
df = pd.DataFrame(result)
filename = 'set_' + set_id + '-dns_inconsistent.csv'
df.to_csv(os.path.join(UPLOAD_PATH, filename))
if api_download != 'true':
@after_this_request
def remove_csv(response):
os.remove(os.path.join(UPLOAD_PATH, filename))
return response
return send_from_directory(UPLOAD_PATH, filename, mimetype="text/csv", as_attachment=True)
return {'status': 'success', 'message': 'Please check your home directory to see the downloaded csv '
'file'}
elif table == 'dns_failed':
query = db.session.query(Failed).join(DnsTestKeys).join(DnsConsistency).filter_by(set_id=set_id).all()
result = FailedSchema(many=True).dump(query).data
df = pd.DataFrame(result)
filename = 'set_' + set_id + '-dns_failed.csv'
df.to_csv(os.path.join(UPLOAD_PATH, filename))
if api_download != 'true':
@after_this_request
def remove_csv(response):
os.remove(os.path.join(UPLOAD_PATH, filename))
return response
return send_from_directory(UPLOAD_PATH, filename, mimetype="text/csv", as_attachment=True)
return {'status': 'success', 'message': 'Please check your home directory to see the downloaded csv '
'file'}
elif table == 'dns_errors':
query = db.session.query(Errors).join(DnsTestKeys).join(DnsConsistency).filter_by(set_id=set_id).all()
result = ErrorsSchema(many=True).dump(query).data
df = pd.DataFrame(result)
filename = 'set_' + set_id + '-dns_errors.csv'
df.to_csv(os.path.join(UPLOAD_PATH, filename))
if api_download != 'true':
@after_this_request
def remove_csv(response):
os.remove(os.path.join(UPLOAD_PATH, filename))
return response
return send_from_directory(UPLOAD_PATH, filename, mimetype="text/csv", as_attachment=True)
return {'status': 'success', 'message': 'Please check your home directory to see the downloaded csv '
'file'}
elif table == 'dns_test_keys':
query = db.session.query(DnsTestKeys).join(DnsConsistency).filter_by(set_id=set_id).all()
result = DnsTestKeysSchema(many=True).dump(query).data
df = | pd.DataFrame(result) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC process signups functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
from datetime import datetime, date
import re, glob, math
from openpyxl import load_workbook # writing to Excel
from PIL import Image, ImageDraw, ImageFont
import tkinter as tk
import pkg.SC_config as cnf # _OUTPUT_DIR and _INPUT_DIR
#%%
def combinephrases(mylist):
''' Combine list of phrases using commas & and '''
if len(mylist)==1:
return str(mylist[0])
elif len(mylist)==2:
tempstr=str(mylist[0])+ ' and ' +str(mylist[1])
return tempstr
else:
rest=mylist[:-1]
rest=[str(i) for i in rest]
last=mylist[-1]
tempstr=', '.join(rest) +' and ' + str(last)
return tempstr#%%
def writetoxls(df, sheetname, xlsfile):
''' Generic write of given df to specified tab of given xls file '''
book=load_workbook(xlsfile)
writer=pd.ExcelWriter(xlsfile, engine='openpyxl', datetime_format='mm/dd/yy', date_format='mm/dd/yy')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer,sheet_name=sheetname,index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def loadtransfers(df, signups):
''' Load transferred players and add to signups (then run player ID);
transfers added as normal players but need fake billing entries
'''
df=df.rename(columns={'Fname':'First','Lname':'Last','Street':'Address','Parish':'Parish of Registration'})
df=df.rename(columns={'Phone':'Phone1','Birthdate':'DOB','Sex':'Gender','Open/Closed':'Ocstatus'})
# Replace Girl, Boy with m f
df.loc[:,'Gender']=df.Gender.replace('F','Girl')
df.loc[:,'Gender']=df.Gender.replace('M','Boy')
# Manually enter sport
print('Enter sport for transferred players')
sport=input()
df.loc[:,'Sport']=sport
df=df.dropna(subset=['First']) # remove blank rows if present
mycols=[col for col in df if col in signups]
df=df[mycols]
df=formatnamesnumbers(df)
# place date/transfer in timestamp
mystamp=datetime.strftime(datetime.now(),'%m/%d/%y')+' transfer'
df.loc[:,'Timestamp']=mystamp
mycols=signups.columns
signups=signups.append(df, ignore_index=True)
signups=signups[mycols]
return signups
def packagetransfers(teams, Mastersignups, famcontact, players, season, year, acronyms, messfile):
''' Package roster and contact info by sport- school and save as separate xls files
also generate customized e-mails in single log file (for cut and paste send to appropriate persons)
args:
teams - loaded team list
mastersignups - signups w/ team assignment
players -player DB
famcontact - family contact db
season - Fall, Winter or Spring
year - starting sports year (i.e. 2019 for 2019-20 school year)
acronyms - school/parish specific abbreviations
messfile - e-mail message template w/ blanks
returns:
'''
teams=teams[pd.notnull(teams['Team'])]
transferteams=np.ndarray.tolist(teams[teams['Team'].str.contains('#')].Team.unique())
transSU=Mastersignups[Mastersignups['Team'].isin(transferteams)]
# ensure that these are from correct season/year
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
transSU=transSU.loc[(transSU['Sport'].isin(sportlist)) & (transSU['Year']==year)] # season is not in mastersignups... only individual sports
# get family contact info from famcontacts
transSU=pd.merge(transSU, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
transSU=pd.merge(transSU, players, how='left', on=['Plakey'], suffixes=('','_r2'))
# get division from Teams xls (for roster)
transSU=pd.merge(transSU, teams, how='left', on=['Team'], suffixes=('','_r3')) # effectively adds other team info for roster toall players
transSU.loc[:,'Role']='Player' # add column for role
# transSU['Open/Closed']='Closed'
# Sort by grade pre-split
transSU.loc[:,'Grade']=transSU.Grade.replace('K',0)
transSU.loc[:,'Grade']=transSU.Grade.apply(int)
transSU=transSU.sort_values(['Grade'], ascending=True)
transSU.loc[:,'Grade']=transSU.Grade.replace(0,'K') # replace K with zero to allow sorting
# Column for sorting by transferred to school
transSU.loc[:,'Transchool']=transSU['Team'].str.split('#').str[0]
grouped=transSU.groupby(['Sport','Transchool'])
for [sport, school], group in grouped:
# prepare roster tab
xlsname=cnf._OUTPUT_DIR+'\\Cabrini_to_'+school+'_'+sport+'_'+str(year)+'.xlsx'
writer=pd.ExcelWriter(xlsname, engine='openpyxl')
Transferroster=organizeroster(group)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.to_excel(writer,sheet_name='roster',index=False)
# prep contacts tab
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Team']
Transfercontacts=group[mycols]
Transfercontacts.to_excel(writer, sheet_name='contacts', index=False)
writer.save()
# Now generate list of e-mails for all schools/directors
logfile='transfers_director_emails_log.txt'
with open(logfile,'w+') as emaillog:
# Read generic file to sport director
with open(messfile, 'r') as file:
blankmessage=file.read()
for [sport, school], group in grouped:
plagroup=group.groupby(['Grade', 'Gender'])
platypes=[] # list of # of players by grade, gender
gradedict={'K':'K', 1:'1st', 2:'2nd',3:'3rd',4:'4th',5:'5th',6:'6th', 7:'7th',8:'8th'}
genderdict={'f':'girls', 'F':'girls','m':'boys','M':'boys'}
for [grade, gender], group in plagroup:
numplays=str(int(group['Grade'].count()))
grname=gradedict.get(grade)
genname=genderdict.get(gender)
platypes.append(numplays+' '+grname+' '+genname)
plalist=combinephrases(platypes)
thismess=blankmessage.replace('$SCHOOL', school)
thismess=thismess.replace('$SPORT', sport)
thismess=thismess.replace('$PLALIST', plalist)
emaillog.write(thismess)
emaillog.write('\n\n')
return
def findcards():
'''Search ID cards folder and return player # and file link
cards resized to 450x290 pix jpg in photoshop (scripts-image processor)
keys are either player number as string or coach CYC ID, vals are links to files'''
cardlist=glob.glob('%s\\IDcards\\*.jpg' %cnf._OUTPUT_DIR, recursive=True)
# construct list of [card #, filename]
nums=[i.split('\\')[-1] for i in cardlist]
nums=[i.split('_')[0] if '_' in i else i.split('--')[0] for i in nums ]
cards={} # dict for card numbers/filenames
for i,num in enumerate(nums):
cards.update({num: cardlist[i]})
return cards
def makethiscard(IDlist, team):
''' Passes link to ID card or player name (if missing) From team's list of player numbers (in alphabetical order), find/open card links, and create single image'''
# make the master image and determine image array size
margin=10 # pix on all sides
if len(IDlist)<11: # use 2 x 5 array (horiz)
wide=2
high=5
elif len(IDlist)<13: # 4w x 3 h (vert)
wide=4
high=3
elif len(IDlist)<22: # 3x by 5-7 high (horiz); max 21
wide=3
high=math.ceil(len(IDlist)/3)
else: # more than 21 ... yikes
wide=3
high=math.ceil(len(IDlist)/3)
cardimage = Image.new('RGB', (450*wide+2*margin, 300*high+2*margin), "white") # blank image of correct size
draw=ImageDraw.Draw(cardimage) # single draw obj for adding missing card names
ttfont=ImageFont.truetype('arial.ttf', size=36)
for i,fname in enumerate(IDlist):
row=i//high # remainder is row
col=i%high # mod is correct column
xpos=margin+row*450
ypos=margin+col*300
try:
thiscard=Image.open(fname)
thiscard=thiscard.resize((450, 300), Image.ANTIALIAS)
cardimage.paste(im=thiscard, box=(xpos, ypos)) # paste w/ xpos,ypos as upper left
except: # occurs when "first last" present instead of file name/path
# blankcard=Image.new('RGB', (450, 300)) # make blank image as placeholder
draw.text((xpos+50,ypos+100),fname,font=ttfont, fill="red")
return cardimage
''' TESTING
i=0 team=teamlist[i]
'''
def makeCYCcards(df, players, teams, coaches, season, year, **kwargs):
''' From mastersignups and teams, output contact lists for all teams/all sports separately
team assignments must be finished
args:
df -- mastersignups dataframe
players - player info dataframe
teams - this year's teams csv
coaches - full coach CYC info list
season - Fall, Winter or Spring
kwargs:
showmissing - True (shows missing player's name); False- skip missing player
otherSchools - default False (also make card sheets for transferred teams/players)
kwargs={'showmissing':False}
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':True} )
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':False} )
'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
cards=findcards() # dictionary with number: filename combo for existing CYC cards
df=df[(df['Year']==year)]
df=df.reset_index(drop=True)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df[df['Sport'].isin(sportlist)] # season is not in mastersignups... only individual sports
# Make list of teams that need cards (all track and others >1st grade)
def processGrade(val):
if val=='K':
return 0
else:
return int(val)
teams.loc[:,'Grade'] = teams['Grade'].apply(lambda x:processGrade(x))
if not kwargs.get('otherSchools', False):
# all transfer teams with contain # (e.g. SMOS#3G) so remove these
# dropped by default
teams = teams[~teams['Team'].str.contains('#')]
# need track teams or any team from grades 2+
cardTeamList= teams[ (teams['Grade']>1) | (teams['Sport']=='Track') ]['Team'].unique()
df=df[ df['Team'].isin(cardTeamList) ]
df=df.sort_values(['Last'])
# plakeys as string will be easiest for below matching
df.loc[:,'Plakey']=df['Plakey'].astype(int)
df.loc[:,'Plakey']=df['Plakey'].astype(str)
def getName(gr, pk):
# get name from plakey as string
match=gr[gr['Plakey']==pk]
name=match.iloc[0]['First'] + ' ' + match.iloc[0]['Last']
return name
teamgrouped = df.groupby(['Team'])
missinglist=[] # list of plakeys with missing card
for team, gr in teamgrouped:
# keys in card dict are strings
IDlist = [str(int(i)) for i in gr.Plakey.unique()]
missinglist.extend([i for i in gr.Plakey.unique() if i not in cards.keys() ])
if not kwargs.get('showmissing', False):
# Shows only valid cards, drops missing names
IDlist = [ cards.get(i) for i in IDlist if i in cards.keys() ]
filename='Cards_'+ team +'.jpg'
else: # show cards and missing name when card image not in IDcards folder
IDlist = [cards.get(i) if i in cards.keys() else getName(gr, i) for i in IDlist ]
filename='Cards_'+ team +'_all.jpg'
# get team's coaches
IDlist.extend(getcoachIDs(team, teams, coaches, cards)) # add coach ID image file or first/last if missing
cardimage =makethiscard(IDlist, team) # directly saved
# save the card file
cardimage.save(cnf._OUTPUT_DIR+'\\'+filename)
missingcards=players[players['Plakey'].isin(missinglist)]
missingcards=missingcards.sort_values(['Grade','Last'])
return missingcards
def getcoachIDs(team, teams, coaches, cards):
''' Returns CYC IDs for all team's coaches '''
thisteam=teams[teams['Team']==team]
IDlist=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return IDlist # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return IDlist # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
thisID=thisteam.iloc[0]['Coach ID'].strip()
if thisID in cards:
IDlist.append(cards.get(thisID,'')) # file path to this coach's ID
else: # get first/last
thiscoach=coaches[coaches['Coach ID']==thisID]
if len(thiscoach)==1:
IDlist.append(thiscoach.iloc[0]['Fname']+' '+thiscoach.iloc[0]['Lname'])
else:
print("Couldn't find coach ", thisID)
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
for i, asstID in enumerate(asstIDs):
if asstID in cards:
IDlist.append(cards.get(asstID,'')) # found assistant coaches ID card image
else: # can't find ... get assistant first last
thisasst=coaches[coaches['Coach ID']==asstID] # matching asst coach row
if len(thisasst)==1:
IDlist.append(thisasst.iloc[0]['Fname']+' '+thisasst.iloc[0]['Lname'])
else:
print("Couldn't find coach ", asstID)
return IDlist
def autocsvbackup(df, filename, newback=True):
''' Pass df (i.e players for backup and basename (i.e. "family_contact" for file.. finds list of existing backups and keeps ones of
certain ages based on targetdates list;
can't remember why was newback=False was needed (always true here to make new backup)
'''
# TODO fix this!
pass
return
def parseDate(val):
'''
Conversion of date string to datetime.date (always header line 2 40:60)
Possible date formats: 20180316 (newer style) or 03/15/2018 (older style)
For NGA files Date format changed from 03/15/2018 to 20180316 (on jday 75 in 2018)
time format: 221100 or 22:11:00 (sometimes w/ UTC)
not terribly concerned w/ time
possible date formats: 0) 03/01/2018, 3/1/2018, 3/1/18 or 03/01/18
2) 1/1/19 2) 2019-1-1 3) 2019-01-01
'''
if not isinstance(val, str):
return val
else:
if ' ' in val: # Remove time substring (but will fail for 3 Oct 2019)
val=val.split(' ')[0] # strip time substring if present
patterns=['\d{1,2}/\d{1,2}/\d{2,4}', '\d{4}-\d{1,2}-\d{1,2}', '\d{1,2}-\d{1,2}-\d{4}']
for i, patt in enumerate(patterns):
match=re.search(r'%s' %patt, val)
if match:
if i==0: # Extract 03/16/2018 (or rarely 28/10/2019 style)
try:
(mo,dy,yr)=[int(i) for i in val.split('/')]
if yr<100 and len(str(yr))==2: # handle 2 digit year
yr=int('20'+str(yr))
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return None
if i==1: # extract 2017-01-01 style (year first)
try:
(yr,mo,dy)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
if i==2: # extract 01-01-2019 style (year last)
try:
(mo,dy,yr)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
def loadProcessPlayerInfo():
'''Loads and processes players & family contacts (but not signup file)
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
famcontact=formatnamesnumbers(famcontact)
return players, famcontact
def loadProcessGfiles(gsignups, season, year):
'''Loads and processes players, family contacts and signup file, gets active
season and year
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
if season=='Winter':
gsignups['Sport']='Basketball'
# TODO determine where multiple sports converted to separate lines
duplicated=gsignups[gsignups.duplicated(subset=['First', 'Last','Grade','Sport'])]
if len(duplicated)>0:
print('Remove duplicate signups for %s' %", ".join(duplicated.Last.unique().tolist()))
gsignups=gsignups.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
gsignups.loc[:,'Sport']=gsignups['Sport'].str.replace('Volleyball','VB')
#gsignups.loc[:,'Sport']=gsignups.loc[:,'Sport'].str.replace('Volleyball','VB').copy()
#gsignups.loc[:,'Sport']=gsignups['Sport'].replace({'Volleyball':'VB'}, regex=True).copy()
missing=[i for i in ['Famkey','Plakey'] if i not in gsignups.columns]
for col in missing: # add blank vals
gsignups.loc[gsignups.index, col]=np.nan
# convert assorted DOB strings to datetime.date
if not isinstance(gsignups.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
gsignups.loc[:,'DOB']=gsignups.DOB.apply(lambda x: parseDate(x))
# Get year from signup file name
outputduplicates(gsignups) # quick check of duplicates output in console window (already removed from signups)
gsignups=formatnamesnumbers(gsignups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
def processGkey(val):
''' Some plakey/famkey copied to drive... must convert nan(float), whitespace or
number as string to either nan or int
'''
if isinstance(val, str):
val=''.join(val.split(' '))
if val=='':
return np.nan
else:
try:
return int(val)
except:
return np.nan
else:
return np.nan
# ensure gsignups has only int or nan (no whitespace)
gsignups.loc[:,'Plakey']=gsignups['Plakey'].apply(lambda x: processGkey(x))
gsignups.loc[:,'Famkey']=gsignups['Famkey'].apply(lambda x: processGkey(x))
return players, famcontact, gsignups
def loadprocessfiles(signupfile):
'''Loads and processes players, family contacts and signup file, gets active
season and year '''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if type(players.DOB[0])!=pd.Timestamp: # sometimes direct import to pd timestamp works, other times not
try:
players.loc[:'DOB']=parseDate(players.DOB) # return properly converted date columns series
except:
print('Failure converting player DOB to datetime/timestamp')
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
# read this season's sports signup file and rename columns
if signupfile.endswith('.csv'):
SUraw=pd.read_csv(signupfile)
elif 'xls' in signupfile:
try:
SUraw=pd.read_excel(signupfile, sheetname='Raw') # may or may not have plakey/famkey
except:
SUraw=pd.read_excel(signupfile)
if SUraw.shape[1]==30 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Unisize','Unineed','Plakey','Famkey']
elif SUraw.shape[1]==28 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Plakey','Famkey']
elif SUraw.shape[1]==26 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2']
elif SUraw.shape[1]==28 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2','Unisize','Unineed']
SUraw.loc[SUraw.index,'Plakey']=np.nan # add if absent
SUraw.loc[SUraw.index,'Famkey']=np.nan
signups=SUraw.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
signups['Sport'].replace({'Volleyball':'VB'},inplace=True, regex=True)
# Get year from signup file name
season=re.match(r'(\D+)', signupfile).group(0) # season at string beginning followed by year (non-digit)
if '\\' in season: # remove file path problem
season=season.split('\\')[-1]
year=int(re.search(r'(\d{4})', signupfile).group(0)) # full year should be only number string in signups file
outputduplicates(SUraw) # quick check of duplicates output in console window (already removed from signups)
signups=formatnamesnumbers(signups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
return players, famcontact, signups, season, year
def findavailablekeys(df, colname, numkeys):
'''Pass df and colname, return a defined number of available keys list
used for players, families, signups, etc.
'''
# list comprehension
allnums=[i for i in range(1,len(df))]
usedkeys=df[colname].unique()
usedkeys=np.ndarray.tolist(usedkeys)
availkeys=[i for i in allnums if i not in usedkeys]
if len(availkeys)<numkeys: # get more keys starting at max+1
needed=numkeys-len(availkeys)
for i in range(0,needed):
nextval=int(max(usedkeys)+1) # if no interior vals are available find next one
availkeys.append(nextval+i)
availkeys=availkeys[:numkeys] # truncate and only return the requested number of needed keys
return availkeys
def organizeroster(df):
''' Renaming, reorg, delete unnecessary columns for CYC roster output
already split by sport and year'''
df=df.rename(columns={'First':'Fname','Last':'Lname','Address':'Street','Parish_registration':'Parish of Registration'})
df=df.rename(columns={'Parish_residence':'Parish of Residence','Phone1':'Phone','DOB':'Birthdate','Gender':'Sex'})
df=df.rename(columns={'Email1':'Email'})
# replace Girl, Boy with m f
df.loc[:,'Sex']=df.Sex.replace('Girl','F').replace('Boy','M')
df.loc[:,'Sex']=df.Sex.str.upper() # ensure uppercase
# Convert date format to 8/25/2010 string format
mycols=['Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
df=df[mycols] # put back in desired order
df=df.sort_values(['Team'])
return df
'''TESTING row=tempplay.iloc[7]
signups=signups[signups['Last']=='Elston']
'''
def processdatachanges(signups, players, famcontact, year):
'''Pass SC signups subset from google drive, update address for more up-to-date
contact information, new address, etc.
must start here if troubleshooting
args:
signups -- online signups file (normally google drive)
players - player DOB, grade, etc
famcontact- family contact info
year - sports year (int); e.g. 2019 for 2019-20 school year
'''
# Using all entries from signups (manual and gdrive)
# Updates from paper signups should be done directly to famcontact and players csv files (skip entirely)
'''
signups.Timestamp=pd.to_datetime(signups.Timestamp, errors='coerce') # converts to naT or timestamp
gdsignups=signups.dropna(subset=['Timestamp']) # drops manual entries (no google drive timestamp)
'''
# merge w/ players and update grade, recalc grade adjustment, and school
# must use left merge to keep correct indices from players df (inner causes reindexing)
players=players.reset_index(drop=True)
tempplay=pd.merge(players, signups, how='inner', on=['Plakey'], suffixes=('','_n'))
tempplay=tempplay.dropna(subset=['Gender_n']) # this drops all without a google drive entry
for index, row in tempplay.iterrows():
upkwargs={}
# Skip approval for grade updates
if row.Grade!=row.Grade_n: # grade discrepancy between players.csv and current signup
match=players[players['Plakey']==row.Plakey]
if len(match)==1:
thisind=match.index[0]
# update player grade (no approval)
players.loc[thisind,'Grade']=row.Grade_n # set to new value from current signup file
print (row.First," ",row.Last," grade changed to ", row.Grade_n)
if row.School!=row.School_n and str(row.School_n)!='nan':
upkwargs.update({'school':True})
# Check for DOB inconsistency between google drive and players.csv
if row.DOB!=row.DOB_n: # don't change grade adjustment if DOB discrepancy
if row.DOB_n.year!=year: # skip birthday instead of DOB error
upkwargs.update({'DOB':True})
else: # recalculate grade adjustment
# Direct adjustment to gradeadj in players (if indicated)
players=updategradeadjust(row, players, year)
if 'school' in upkwargs or 'DOB' in upkwargs:
# Interactively approve school or DOB changes
players=updateplayer_tk(row, players, **upkwargs)
autocsvbackup(players,'players', newback=True) # run autobackup script
outname=cnf._OUTPUT_DIR+'\\players.csv'
players.to_csv(outname,index=False) # direct save of changes from google drive info
# now update new info into family contacts
# faminfo=gdsignups.drop_duplicates(subset=['Famkey']) # only process first kid from family
faminfo=signups.drop_duplicates(subset=['Famkey'])
famcontact=prepcontacts(famcontact)
faminfo=prepcontacts(faminfo)
tempfam=pd.merge(famcontact, faminfo, how='inner', on=['Famkey'], suffixes=('','_n')) # same indices as famcontact
tempfam=tempfam.dropna(subset=['Zip_n']) # drops those without timestamped google drive entry
for index,row in tempfam.iterrows():
# Update/reshuffle phone, email, parent list, parish of registration (direct to famcontact)
famcontact=update_contact(row, famcontact) # update/reshuffle phone,text (list of lists)
autocsvbackup(famcontact,'family_contact', newback=True) # run autobackup script
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return players, famcontact
def updatefamcon_tk(row, famcontact, **upkwargs):
''' Interactive approval of family contact changes
changes directly made to famcontacts (but not yet autosaved)
upkwargs: phone, email, address
'''
root = tk.Tk()
root.title('Update family contact info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Family: '+row.Family+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
tk.Label(root, text='Deselect to remove').grid(row=rownum, column=1)
rownum+=1
# Use listbox of common schools?
if 'parlist' in upkwargs: # indicates new parent found
colnum=0
parlist=upkwargs.get('parlist',[])
# Checkboxes to add new parent
if 'newpar1' in upkwargs:
addpar1=tk.BooleanVar()
addpar1.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar1',[]))+'?')
except:
print('Error adding parent 1', )
mytext=''
tk.Checkbutton(root, variable=addpar1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'newpar2' in upkwargs:
addpar2=tk.BooleanVar()
addpar2.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar2',[]))+'?')
except:
mytext=''
tk.Checkbutton(root, variable=addpar2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each parent (default true)
pbools=[] # List of bools for parent inclusion
for i in range(0,len(parlist)):
pbools.append(tk.BooleanVar())
pbools[i].set(True)
tempstr=parlist[i]
tk.Checkbutton(root, variable=pbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'emails' in upkwargs: # indicates new parent found
emaillist=upkwargs.get('emails',[])
# Checkboxes to add new parent
colnum=0
if 'email1' in upkwargs:
addemail1=tk.BooleanVar()
addemail1.set(True)
email1=tk.StringVar()
email1.set(upkwargs.get('email1',''))
tk.Checkbutton(root, variable=addemail1, text='Add new email1').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email1).grid(row=rownum, column=colnum)
rownum+=1
if 'email2' in upkwargs:
addemail2=tk.BooleanVar()
addemail2.set(True)
email2=tk.StringVar()
email2.set(upkwargs.get('email2',''))
tk.Checkbutton(root, variable=addemail2, text='Add new email2').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email2).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
ebools=[] # List of bools for parent inclusion
for i in range(0,len(emaillist)):
ebools.append(tk.BooleanVar())
tempstr=emaillist[i]
ebools[i].set(True)
tk.Checkbutton(root, variable=ebools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'phones' in upkwargs: # indicates new parent found
phlist=upkwargs.get('phones',[])
# Checkboxes to add new parent
colnum=0
if 'phone1' in upkwargs:
addphone1=tk.BooleanVar()
addphone1.set(True)
try:
mytext='Add phone/text: '+ upkwargs.get('phone1','')
except:
mytext=''
tk.Checkbutton(root, variable=addphone1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'phone2' in upkwargs:
addphone2=tk.BooleanVar()
addphone2.set(True)
try:
mytext='Add phone/text: '+ ', '.join(upkwargs.get('phone2',[]))
except:
mytext=''
tk.Checkbutton(root, variable=addphone2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
phbools=[] # List of bools for parent inclusion
for i in range(0,len(phlist)):
phbools.append(tk.BooleanVar())
tempstr=phlist[i]
phbools[i].set(True)
tk.Checkbutton(root, variable=phbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
if 'address' in upkwargs:
colnum=0
tk.Label(root, text='Possible change of address').grid(row=rownum, column=colnum)
rownum+=1
newaddrbool=tk.BooleanVar()
newaddr=tk.StringVar()
newaddrbool.set(False)
newaddr.set(row.Address_n)
newzip=tk.StringVar()
try:
newzip.set(int(row.Zip_n))
except:
print('Non-standard zip value',str(row.Zip_n))
tk.Checkbutton(root, variable=newaddrbool, text='Change address?').grid(row=rownum, column=colnum)
colnum+=1
tk.Label(root, text='Current address').grid(row=rownum, column=colnum)
colnum=0
rownum+=1
tk.Entry(root, textvariable=newaddr).grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=newzip).grid(row=rownum, column=colnum)
colnum+=1
tempstr=str(row.Address)+' '+str(row.Zip)
tk.Label(root, text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
# Find matching row for family (needed for all changes below)
famkey=row.Famkey
match=famcontact[famcontact['Famkey']==famkey]
if len(match)==1:
thisind=match.index[0]
else:
print('Problem finding unique entry for famkey', str(famkey))
return famcontact # return unaltered
# Reconstruct parent list
if 'parlist' in upkwargs:
newparlist=[] # constructing entirely new parent list from checkbox choices
if 'newpar1' in upkwargs:
if addpar1.get():
newparlist.append(upkwargs.get('newpar1',[np.nan,np.nan]))
#TODO fix nan error
print('Added parent',' '.join(upkwargs.get('newpar1')),' to ',str(row.Family))
for i, val in enumerate(pbools):
if pbools[i].get():
newparlist.append(parlist[i]) # [first, last] format
if 'newpar2' in upkwargs:
if addpar2.get():
newparlist.append(upkwargs.get('newpar2',[np.nan,np.nan]))
print('Added parent 2',' '.join(upkwargs.get('newpar2')),' to ',str(row.Family))
# Now direct update of parents in this family's famcontact entry
newparlist=newparlist[0:3] # limit to 3 entries
while len(newparlist)<3:
newparlist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
famcontact.loc[thisind, fname] = newparlist[i-1][0]
famcontact.loc[thisind, lname] = newparlist[i-1][1]
# Reconstruct email list
if 'emails' in upkwargs:
newemaillist=[]
if 'email1' in upkwargs:
if addemail1.get():
newemaillist.append(email1.get())
print('Added email1', email1.get(), ' to ', str(row.Family))
for i, val in enumerate(ebools):
if ebools[i].get():
newemaillist.append(emaillist[i])
if 'email2' in upkwargs:
if addemail2.get():
# insert in 2nd position
newemaillist.insert(1, email2.get())
print('Added email2', email2.get(), ' to ', str(row.Family))
# Now update emails in famcontact entry
# Direct update of parent list
newemaillist=newemaillist[0:3] # limit to 3 entries
while len(newemaillist)<3:
newemaillist.append(np.nan) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
colname='Email'+str(i)
famcontact.loc[thisind, colname]= newemaillist[i-1]
# Reconstruct phone list
if 'phones' in upkwargs:
newphlist=[]
if 'phone1' in upkwargs:
if addphone1.get():
newphlist.append(upkwargs.get('phone1', [np.nan,np.nan]))
print('Added phone1', ','.join(upkwargs.get('phone1',[])), ' to ', str(row.Family))
for i, val in enumerate(phbools):
if phbools[i].get():
newphlist.append(phlist[i])
# added at end... probably should go
if 'phone2' in upkwargs:
if addphone2.get():
# insert in 2nd position
newphlist.insert(1, upkwargs.get('phone2',[np.nan,np.nan]))
print('Added phone2', ','.join(upkwargs.get('phone2',[])), ' to ', str(row.Family))
# Now update phone, text in famcontact entry
newphlist=newphlist[0:4] # limit to 4 entries
while len(newphlist)<4:
newphlist.append([np.nan, np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,5): # reset max 4 phone entries
phname='Phone'+str(i)
textname='Text'+str(i)
famcontact.loc[thisind, phname] = newphlist[i-1][0]
famcontact.loc[thisind, textname] = newphlist[i-1][1]
# Handle change of address (direct change if approved)
# Also change associated zip code and reset parish of residence
if 'address' in upkwargs:
if newaddrbool:
print('Address changed for ', str(row.Family))
famcontact.loc[thisind, 'Address'] = newaddr.get()
# Reset parish of residence to nan (manually find and replace)
famcontact.loc[thisind, 'Parish_residence'] = np.nan
try:
famcontact.loc[thisind,'Zip']=int(newzip.get())
except:
print('Problem converting zip code ', newzip.get())
# TODO ... handle parish of registration
return famcontact
def update_contact(row, famcontact):
'''Update phone and textable list from google drive entries;
google drive raw entries first processed in process_data_changes (then update
contacts is called)
row is a merge of existing famcontact info and new signup info
existing entries from fam_contact listed first;
pass/modify/return series for family; reorder/replace numbers
has fairly long list of changes made w/o interactive approval:
1) changing order of email or phone numbers (e.g. swap phone1 and phone2)
2) add phone2 (or email2) if current phone2(email2) is nan
3) change order of parents (new parent1)
All other changes done w/ interactive approval using update_famcon_tk
'''
# [phone, text, order]
thisfam=row.Family
match=famcontact[famcontact['Famkey']==row.Famkey]
if len(match)==1:
thisind=match.index[0] # correct index for updating this family in famcontacts
else:
print(str(row.Family), " not found in famcontacts.. shouldn't happen")
return famcontact
upkwargs={} # empty dict for monitoring all changes
# check for possible change in address (housenum as trigger)
match1=re.search(r'\d+', row.Address)
match2=re.search(r'\d+', row.Address_n)
if match1 and match2:
num1=match1.group(0)
num2=match2.group(0)
if num1!=num2: # change in address number strongly suggestive of actual change
upkwargs.update({'address':True})
else:
print('No address # found for', str(thisfam))
phonelist=[] # list of lists with number and textable Y/N
for i in range(1,5): # get 4 existing phone entries (phone1, phone2, etc.)
phname='Phone'+str(i)
txtname='Text'+str(i)
if str(row[phname])!='nan':
phonelist.append([row[phname],row[txtname]]) # as phone and text y/N
# New google drive entries will be Phone1_n.. look for phone/text pair in existing list
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] in phonelist: # new ones phone is required entry
# default move of phone1, text1 to top of list - no confirmation
if [row.Phone1_n,row.Text1_n]!=phonelist[0]: # move if not in first position
phonelist.insert(0,phonelist.pop(phonelist.index([row.Phone1_n,row.Text1_n])))
print('Phone 1 changed for ', str(thisfam))
upkwargs.update({'phchange':True})
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] not in phonelist: # new ones phone is required entry
if [row.Phone1_n, np.nan] in phonelist: # remove if # present but w/o text indication (no confirm)
phonelist.remove([row.Phone1_n,np.nan])
phonelist.insert(0,[row.Phone1_n,row.Text1_n]) # insert in first position
print('Updated phone 1 to', row.Phone1_n,' for ',str(thisfam))
upkwargs.update({'phchange':True})
else:
# phone1 change to be confirmed
upkwargs.update({'phone1':[row.Phone1_n,row.Text1_n]})
upkwargs.update({'phones': phonelist})
if str(row.Phone2_n)!='nan': # check for phone2 entry (with _n suffix)
if [row.Phone2_n,row.Text2_n] not in phonelist: # add second phone to 2nd position if not present
if [row.Phone2_n,np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([row.Phone2_n,np.nan])
phonelist.insert(1,[row.Phone2_n,row.Text2_n])
print ('Updated phone 2 to ', str(row.Phone2_n), 'for ', str(thisfam))
upkwargs.update({'phchange':True})
else: # get approval for phone 2 addition
upkwargs.update({'phone2':[row.Phone2_n,row.Text2_n]})
upkwargs.update({'phones': phonelist})
# Construct existing list of known email addresses
emaillist=[]
for i in range(1,4): # get 3 existing email entries
emailname='Email'+str(i)
if str(row[emailname])!='nan':
emaillist.append(row[emailname].lower())
# Find new email1 entry in google drive data
if str(row.Email)!='nan' and '@' in row.Email: # real primary gd named email
if row.Email.lower() in emaillist: # add in first position if not present (no confirmation)
if row.Email.lower()!=emaillist[0]: # check if in first position already
emaillist.insert(0,emaillist.pop(emaillist.index(row.Email)))
upkwargs.update({'emchange':True})
print ('Updated email 1 ', str(row.Email.lower()), 'for family', str(thisfam))
else: # confirm email1 if not present
upkwargs.update({'email1':row.Email})
upkwargs.update({'emails':emaillist})
# look for new email in email2 position and add
if str(row.Email2_n)!='nan' and '@' in row.Email2_n:
if row.Email2_n.lower() not in emaillist: # add second email to 2nd position if not present
upkwargs.update({'email2':row.Email2_n})
upkwargs.update({'emails':emaillist})
# Update list of parent names (max 3 entries)
parlist=[] # construct existing list from family contacts
# skip if all nan for entered parents (non-gd entry)
for i in range(1,4): # construct existing parents list
fname='Pfirst'+str(i)
lname='Plast'+str(i)
if str(row[fname])!='nan':
parlist.append([row[fname],row[lname]]) # list of lists [first, last]
if str(row.Pfirst1_n)!='nan': # skip if parent name is nan
if [row.Pfirst1_n,row.Plast1_n] in parlist: # reorder in list
if [row.Pfirst1_n,row.Plast1_n]!=parlist[0]: # check if already in first
# move to first position (everything else requires approval)
parlist.insert(0,parlist.pop(parlist.index([row.Pfirst1_n,row.Plast1_n])))
parlist.insert(0,[row.Pfirst1_n, row.Plast1_n]) # insert in first position
upkwargs.update({'parchange':True})
else: # parent not in list (confirm)
upkwargs.update({'newpar1':[row.Pfirst1_n,row.Plast1_n]})
upkwargs.update({'parlist':parlist})
# inserts in first position while simultaneously removing other entry
if str(row.Pfirst2_n)!='nan': # Check for parent 2 entry
if [row.Pfirst2_n,row.Plast2_n] not in parlist: # add second phone to 2nd position if not present
upkwargs.update({'newpar2':[row.Pfirst2_n,row.Plast2_n]})
upkwargs.update({'parlist':parlist})
# Save auto-changes in phone to family contacts
if 'phchange' in upkwargs: # Record altered phonelist in famcontacts
if 'phones' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'phones': phonelist}) # ensure most current copy
phonelist=phonelist[0:3] # construct proper list
while len(phonelist)<4:
phonelist.append([np.nan,np.nan]) # pad with nan entries if necessary
for i in range(1,5): # reset 4 existing phone entries
phname='Phone'+str(i)
txtname='Text'+str(i)
famcontact.loc[thisind, phname] = phonelist[i-1][0] # first of tuple is phone
famcontact.loc[thisind, txtname] = phonelist[i-1][1] # 2nd of tuple is text y/n
del upkwargs['phchange']
print('automatic phone changes for', thisfam)
# Save auto-changes in emails to family contacts
if 'emchange' in upkwargs: # Record altered phonelist in famcontacts
if 'emails' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'emails': emaillist}) # ensure most current copy
emaillist=emaillist[0:2] # construct proper list
while len(emaillist)<3:
emaillist.append(np.nan) # pad with nan entries if necessary
for i in range(1,4): # reset 4 existing phone entries
emname='Email'+str(i)
famcontact.loc[thisind, emname] =emaillist[i-1]
del upkwargs['emchange']
print('automatic email changes for', thisfam)
if 'parchange' in upkwargs: # Record altered parents list in famcontacts
if 'parlist' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'parlist': parlist}) # ensure most current copy
parlist=parlist[0:2] # construct proper list
while len(parlist)<3:
parlist.append(np.nan) # pad with nan entries if necessary (3 total)
for i in range(1,4): # reset 4 existing phone entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
try:
famcontact.loc[thisind, fname] =parlist[i-1][0]
famcontact.loc[thisind, lname] =parlist[i-1][1]
except:
print('Error updating parents for', thisfam)
del upkwargs['parchange']
print('automatic parent changes for', thisfam)
# now check for any changes needing interactive approval
if len(upkwargs)>0: # something needs interactive approval
famcontact=updatefamcon_tk(row, famcontact, **upkwargs)
return famcontact
def updateplayer_tk(row, players, **upkwargs):
''' Interactive approval of player info updates (except date)
changes directly made to players (but not yet autosaved)
called by processdatachanges
'''
commonschools=['Cabrini','Soulard','SLPS','Charter','Private']
root = tk.Tk()
root.title('Update player info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Player:'+row.First+' '+row.Last+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
rownum+=1
# Use listbox of common schools?
if 'DOB' in upkwargs: # indicates discrepancy
DOB1=date(row.DOB)
DOB2=date(row.DOB_n)
# create and display DOB variables
def ChooseDOB1(event):
DOB.set(datetime.strftime(DOB1,'%m/%d/%y'))
def ChooseDOB2(event):
DOB.set(datetime.strftime(DOB2,'%m/%d/%y'))
DOB=tk.StringVar()
DOB.set(datetime.strftime(DOB1,'%m/%d/%y')) # defaults to original
tk.Label(root, text='Update date of birth?').grid(row=rownum, column=0)
mytxt='current DOB:'+datetime.strftime(DOB1,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB1)
b.grid(row=rownum, column=1)
mytxt='New DOB:'+datetime.strftime(DOB2,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB2)
b.grid(row=rownum, column=2)
tk.Entry(master=root, textvariable=DOB).grid(row=rownum, column=3)
rownum+=1
if 'school' in upkwargs:
school=tk.StringVar()
school.set(row.School) # default to existing value
tk.Label(root, text='Update school?').grid(row=rownum, column=0)
rownum+=1
def newschool(event):
school.set(row.School_n)
def oldschool(event):
school.set(row.School)
def pickschool(event):
# double-click to pick standard school choice
items=lb.curselection()[0] # gets selected position in list
school.set(commonschools[items])
tk.Entry(root, textvariable=school).grid(row=rownum, column=2)
mytxt='new school:'+str(row.School_n)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', newschool)
b.grid(row=rownum, column=1)
mytxt='existing school:'+str(row.School)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', oldschool)
b.grid(row=rownum, column=0)
# also include selectable listbox of common school choices
lb=tk.Listbox(master=root, selectmode=tk.SINGLE)
lb.bind("<Double-Button-1>", pickschool)
lb.grid(row=rownum, column=3)
for i,sch in enumerate(commonschools):
lb.insert(tk.END, sch)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
try:
# make changes directly to players after finding correct index using plakey
plakey=row.Plakey
match=players[players['Plakey']==plakey]
thisind=match.index[0]
if 'school' in upkwargs:
players.loc[thisind,'School']= school.get()
if 'DOB' in upkwargs:
newDOB=datetime.strptime(DOB.get(),'%m/%d/%y')
players.loc[thisind,'DOB']= newDOB
except:
print('Error updating info for', row.Plakey, row.First, row.Last)
return players
def prepcontacts(df):
''' Prepare for update contacts/ matching with google drive info
avoids possible problems/spaces in manually entered info '''
mycols=['Pfirst1', 'Plast1','Pfirst2', 'Plast2', 'Pfirst3', 'Plast3',
'Phone1', 'Text1','Phone2', 'Text2', 'Phone3', 'Text3', 'Phone4',
'Text4', 'Email1','Email2', 'Email3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.strip()
except: # maybe only nan or not present (i.e. in signups)
pass
mycols=['Text1','Text2','Text3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.replace('No','N', case=False)
df.loc[:,col]=df[col].str.replace('Yes','Y', case=False)
except:
pass
return df
def findyearseason(df):
''' Pass raw signups and determine year and sports season '''
# get year from system clock and from google drive timestamp
now=datetime.now()
val=df.Timestamp[0] # grab first timestamp
if val!=datetime: # if not a timestamp (i.e. manual string entry find one
while type(val)!=datetime:
for index, row in df.iterrows():
val=df.Timestamp[index]
year=val.year # use year value from signup timestamps
if now.year!=val.year:
print ('Possible year discrepancy: Signups are from ',str(val.year))
# now find sports season
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Fall'
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Spring'
mask = np.column_stack([df['Sport'].str.contains("asket", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Winter'
return season, year
def outputduplicates(df):
'''Prints out names of players with duplicated entries into console... can then delete from google drive signups '''
tempdf=df[df.duplicated(['First','Last','Sport'])] # series with 2nd of duplicated entries as True
firsts=tempdf.First.tolist()
lasts=tempdf.Last.tolist()
for f,l in zip(firsts, lasts):
print('Duplicated signup for player: {} {}'.format(f,l))
return
def formatphone(df):
''' Convert all entered phone numbers in dfs phone columns to 314-xxx-xxxx string and standardize text field '''
def phoneFormat(val):
# lambda function phone number reformatting
if not isinstance(val, str):
return val
# replace/remove any white space
val="".join(val.split(' '))
if val=='': # blank phone causes problems
return np.nan
if not re.search(r'(\d+-\d+-\d+)', val):
val=re.sub("[^0-9]", "", val) # substitute blank for non-number
if len(val)==7:
return '314'+val
elif len(val)==11 and val.startswith('1'): # remove starting 1 if present
return val[1:11]
elif len(val)!=10: # sometimes has ---
# print('Bad number: ',val)
return val
else:
return val[0:3]+'-'+val[3:6]+'-'+val[6:10]
else:
return val # already good
# find phone columns (named phone, phone2, etc.)
phlist=[str(s) for s in df.columns if 'Phone' in s]
for col in phlist:
df.loc[:,col]=df[col].apply(lambda x: phoneFormat(x))
# now change yes in any text field to Y
txtlist=[str(s) for s in df.columns if 'Text' in s]
for col in txtlist:
df.loc[:,col]=df[col].replace('yes','Y')
df.loc[:,col]=df[col].replace('Yes','Y')
return df
def standardizeschool(df):
''' can pass any frame with school column and standardize name as Cabrini and Soulard'''
schstr='frances' + '|' + 'cabrini' + '|' + 'sfca' # multiple school matching string
tempdf=df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='Cabrini'
tempdf = df[df['School'].str.contains('soulard', na=False, case=False)]
df.loc[tempdf.index,'School']='Soulard'
tempdf = df[df['School'].str.contains('public', na=False, case=False)]
df.loc[tempdf.index,'School']='Public'
schstr='city garden' + '|' + 'citygarden' # multiple school matching string
tempdf = df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='City Garden'
return df
def formatnamesnumbers(df):
'''Switch names to title case, standardize gender, call phone/text reformat and standardize school name'''
def titleStrip(val):
try:
return val.title().strip()
except:
return val
processCols=['First','Last','Family','Pfirst1','Plast1','Pfirst2','Plast2','Email','Email2']
processCols=[i for i in processCols if i in df.columns]
for col in processCols:
df.loc[:, col]=df[col].apply(lambda x: titleStrip(x))
if 'Gender' in df:
df.loc[:,'Gender']=df.Gender.replace('Girl','f')
df.loc[:,'Gender']=df.Gender.replace('Boy','m')
if 'Grade' in df:
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.replace('pK',0)
try:
df.loc[:,'Grade']=df.Grade.astype(int)
except:
print('Player grade likely missing from raw signup file... enter manually')
df=formatphone(df) # call phone reformatting string
if 'School' in df:
df=standardizeschool(df) # use "Cabrini" and "Soulard" as school names
return df
def graduate_players(players, year):
''' Recalc grade based on grade adjustment, school year (run once per year in fall) and age.
some player grades will already have been updated (generally google drive entries)... however recalc shouldn't
change grade '''
players.loc[:,'Grade']=players.Grade.replace('K',0)
for index,row in players.iterrows():
# replace K with zero
grade=int(players.iloc[index]['Grade']) # get currently listed grade
gradeadj=players.iloc[index]['Gradeadj']
dob=players.iloc[index]['DOB']
if str(gradeadj)=='nan' or str(dob)=='NaT': # skip grade update if info is missing
continue
dob=date(dob)
# calculate current age at beginning of this school on 8/1
age=date(year,8,1)-dob
age = (age.days + age.seconds/86400)/365.2425
# assign grade based on age (and grade adjustment)
newgrade=int(age)+int(gradeadj)-5
if grade!=newgrade:
first=players.iloc[index]['First']
last=players.iloc[index]['Last']
print('Grade changed from',grade,'to',newgrade,'for', first, last)
players.loc[index, 'Grade'] = newgrade
players.loc[:,'Grade']=players.Grade.replace(0,'K')
return players
def removeEmptyFams(players, famcontact):
'''
Remove empty families (with no remaining players)
'''
# Remove families with no active players
plaset=[int(i) for i in list(players.Famkey.unique())]
famset=[int(i) for i in list(famcontact.Famkey.unique())]
# Empty families
emptykey=[i for i in famset if i not in plaset]
empty=famcontact[famcontact['Famkey'].isin(emptykey)]
print('Remove empty families:')
for ind, row in empty.iterrows():
print(row.Family, ':',row.Pfirst1, row.Plast1)
choice=input("Remove empty families (Y,N)?\n")
if choice.upper()=='Y':
famcontact=famcontact[~famcontact['Famkey'].isin(emptykey)]
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return famcontact
def removeHSkids(players):
''' Drop graduated players (9th graders) from list '''
grlist=[i for i in range(0,9)]
grlist.append('K')
Hs=players.loc[~(players.Grade.isin(grlist))]
for ind, row in Hs.iterrows():
print(row.First, row.Last)
choice=input('Remove above HS players (Y/N)?\n')
if choice.upper()=='Y':
players=players.loc[(players.Grade.isin(grlist))]
print('HS Players removed but not autosaved')
return players
def estimategrade(df, year):
'''Estimate grade for this sports season based on DOB.. not commonly used '''
for index, row in df.iterrows():
grade=df.loc[index]['Grade']
if str(grade)=='nan': # skips any players who already have assigned grade
dob=df.loc[index]['DOB']
dob=date(dob) # convert to datetime date from timestamp
first=df.loc[index]['First']
last=df.loc[index]['Last']
if str(dob)=='nan':
print ('DOB missing for ', first,' ', last)
continue # skip to next if dob entry is missing
currage=date(year,8,1) - dob
currage = (currage.days + currage.seconds/86400)/365.2425 # age on first day of school/ sports season
gradeest=int(currage-5)
if gradeest==0:
gradeest='K'
print(first, last, 'probably in grade', gradeest)
df.loc[index,'Grade']=gradeest
return df
def updateoldteams(teams, year):
''' Load old teams after copy to teams tab in teams_coaches, then auto-update year-grade
must be manually saved with saveteams... then any adjustments made manually in Excel'''
# check to ensure teams are not already updated
if teams.iloc[0]['Year']==year:
print('Teams already updated for ', year,' school year')
return teams # pass back unaltered
# temporarily make the K to 0 replacements
teams.Grade=teams.Grade.replace('K',0)
teams.loc[:'Graderange']=teams['Graderange'].astype(str) # convert all to string
teams.loc[:,'Year']=year
teams.loc[:,'Grade']+=1
for index, row in teams.iterrows():
grade=teams.loc[index]['Grade']
div=teams.loc[index]['Division'] # division must match grade
div=div.replace('K','0') # replace any Ks in string
newdiv=''.join([s if not s.isdigit() else str(grade) for s in div]) # find replace for unknown # w/ new grade
teams.loc[index,'Division'] = newdiv
cycname=teams.loc[index]['Team'] # update grade portion of team name
if cycname.startswith('K'):
newcycname='1'+ cycname[1:]
teams.loc[index,'Team'] = newcycname
elif cycname[0].isdigit(): # now update teams beginning w/ numbers
newcycname=str(grade)+ cycname[1:]
teams.loc[index,'Team']= newcycname
# update grade ranges
grrange=teams.loc[index]['Graderange'] # should be all numbers
grrange=grrange.replace('K','0')
newrange=''.join([str(int(i)+1) for i in grrange])
teams.loc[index,'Graderange'] = newrange # grade range stored as string, right?
# no auto-save... save with saveteams after checking for proper changes
return teams
def splitcoaches(df):
''' Pass CYC teams list, split and duplicate rows with comma separated vals in colname for extra coaches'''
df.loc[:,'Role']='Coach' # add col for head or asst (first entry for head coach)
# df['Open/Closed']='Closed'
assistants=df.dropna(subset=['AssistantIDs']) # drop teams w/ no asst coaches
for index, rows in assistants.iterrows():
val=assistants.loc[index,'AssistantIDs']
asstcoaches=[str(s) for s in val.split(',')] #list of assistants for single team
for i,asst in enumerate(asstcoaches):
newrow=assistants.loc[index] # duplicate entry as series
asst=asst.strip() # strip leading, trailing blanks
newrow.loc['Coach ID'] = asst # set this asst coaches ID
newrow.loc['Role'] = 'Assistant Coach'
df=df.append(newrow)
df=df.sort_values(['Team'],ascending=True)
return df
def addcoachestoroster(teams, coaches):
'''Creates roster entries for coaches for each CYC team
pass teams and coaches (with coach roster info)
needed roster cols are all below (except sport used in output parsing)
args: teams -- team table w/ head and asst coach CYC ids
coaches - coaches table with CYC Id (key) and associated info
returns: coachroster --separate df to be appended to main player roster
'''
# Add team coaches (match by CYC-IDs)
thismask = teams['Team'].str.contains('-', case=False, na=False) # finds this season's CYC level teams
CYCcoach=teams.loc[thismask] # also has associated sport
CYCcoach=splitcoaches(CYCcoach) # makes new row for all assistant coaches on CYC teams
CYCcoach=pd.merge(CYCcoach, coaches, how='left', on=['Coach ID'], suffixes=('','_r'))
mycols=['Sport','Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
for col in [col for col in mycols if col not in CYCcoach.columns]:
CYCcoach[col]='' # birthdate generally missing
CYCcoach=CYCcoach[mycols] # put back in desired order
# drop duplicates on CYC ID, team (sometimes occurs during merge)
CYCcoach=CYCcoach.drop_duplicates(['Coach ID','Team'])
return CYCcoach
def countteamplayers(df, teams, season, year):
''' For each team, summarize number of players (subset those that are younger or older) and list of names
passing mastersignups'''
df=df[df['Year']==year] # removes possible naming ambiguity
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season,[])
df=df[df['Sport'].isin(sportlist)] # only this sports season
df.Grade=df.Grade.replace('K',0)
df.Grade=df.Grade.astype('int')
teams.loc[:,'Grade']=teams.Grade.replace('K',0)
teams.loc[:,'Grade']=teams.Grade.astype('int')
teams.loc[:,'Playerlist']=teams.Playerlist.astype('str')
for index, row in teams.iterrows():
teamname=teams.loc[index]['Team']
match=df[df['Team']==teamname] # all players on this team from master_signups
teams.loc[index,'Number'] = len(match) # total number of players
# compose player list (First L.) and add to teams
playerlist=[]
for ind, ro in match.iterrows():
first=match.loc[ind]['First']
last=match.loc[ind]['Last']
strname=first+' ' +last[0]
playerlist.append(strname)
players=", ".join(playerlist)
teams.loc[index,'Playerlist'] = players
# count players above or below grade level
thisgrade=int(teams.loc[index]['Grade'])
teams.loc[index,'Upper'] = (match.Grade > thisgrade).sum()
teams.loc[index,'Lower'] = (match.Grade < thisgrade).sum()
writetoxls(teams, 'Teams', 'Teams_coaches.xlsx')
return teams
def writecontacts(df, famcontact, players, season, year):
''' From mastersignups and teams, output contact lists for all teams/all sports separately '''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Sort by grade pre-split
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.apply(int)
df=df.sort_values(['Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df.loc[:,'Team']=df.Team.replace(np.nan,'None') # still give contacts if team not yet assigned
df.loc[:,'Team']=df.Team.replace('','None')
# Standard sport contacts output for soccer, VB, basketball
if season!='Spring':
for i, sport in enumerate(sportlist):
fname=cnf._OUTPUT_DIR+'\\'+sport+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers to same school
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
# now can organize contacts (and drop sport)
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Thissport=Thissport[mycols] # drop columns and rearrange
for i, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save()
else: # handle spring special case
Balls=df[df['Sport']!='Track'] # all ball-bat sports together
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Balls=Balls[mycols]
teamlist= Balls.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
fname=cnf._OUTPUT_DIR+'\\'+'Batball'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
# create a separate tab for each team and write the contacts
for i, team in enumerate(teamlist):
thisteam=Balls[Balls['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save() # overwrites existing
# Entire track team as single file
Track=df[df['Sport']=='Track']
Track=Track[mycols] # drop columns and rearrange
fname=cnf._OUTPUT_DIR+'\\'+'Track'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Track.to_excel(writer,sheet_name='Track',index=False)
writer.save()
return
def makegoogcont(df, famcontact, players, season, year):
'''Create and save a google contacts file for all Cabrini teams
save to csv '''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Drop any players not yet assigned
df=df.dropna(subset=['Team'])
# Full contacts list format for android/google
for i, sport in enumerate(sportlist):
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# drop if team is not yet assigned
teamlist=[s for s in teamlist if str(s) != 'nan']
# drop if team is 'drop'
teamlist=[s for s in teamlist if str(s) != 'drop']
# Drop all non-Cabrini transferred teams (which must contain #)
teamlist=[s for s in teamlist if '#' not in s]
# Combine track subteams to single team
teamlist=[s[0:5] if 'Track' in s else s for s in teamlist]
teamlist=set(teamlist)
teamlist=list(teamlist)
# now create google contacts list for each Cabrini team and save
for j, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
# Drop duplicate from same family
thisteam=thisteam.drop_duplicates('Phone1')
thisteam.loc[:,'Name']=thisteam['First']+' '+thisteam['Last']
thisteam.loc[:,'Group']=sport+str(year)
mycols=['Name','Pfirst1','Last','Phone1','Phone2','Email1','Email2','Group']
newcols=['Name','Additional Name','Family Name','Phone 1 - Value','Phone 2 - Value',
'E-mail 1 - Value','E-mail 2 - Value','Group Membership']
thisteam=thisteam[mycols]
thisteam.columns=newcols
thisteam=thisteam.replace(np.nan,'')
fname=cnf._OUTPUT_DIR+'\\google'+team+'.csv'
thisteam.to_csv(fname, index=False)
return
def createsignups(df, Mastersignups, season, year):
''' pass signups and add signups to master list, also returns list of current
player keys by sport; typically use writesignupstoExcel instead
args:
df - signup (dataframe)
Mastersignups - existing all signups db-like file
season - ['Fall','Winter','Spring']
year- 4 digit year as int
returns:
Mastersignups - same with new unique entries
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# Use comma sep on multiple sport entries??
now=datetime.now()
thisdate=date.strftime(now,'%m/%d/%Y') # for signup date
df.loc[:,'SUdate']=thisdate # can do this globally although might also add to signups
startlen=len(Mastersignups) # starting number of signups
intcols=['SUkey','Year']
for i, col in enumerate(intcols):
if col not in df:
df.loc[df.index, col]=np.nan
mycols=Mastersignups.columns.tolist() # desired column order
for i, col in enumerate(mycols):
if col not in df:
df.loc[df.index,col]=np.nan
# TODO one option here would be to clone comma-separated sport entries (i.e. track and softball)
for i, sport in enumerate(sportlist):
# Use caution here due to Tball in Softball string problem (currently set to T-ball)
thissport=df.loc[df['Sport'].str.contains(sport, na=False, case=False)] # also handles multi-sports
# Prepare necessary columns
for index, row in thissport.iterrows():
thissport.loc[index,'Sport'] = sport # set individually to formal sport name
thissport.loc[index,'Year'] = int(year)
thissport.loc[index,'SUkey'] = 0 # assigned actual key below
# Now organize signups and add year
Mastersignups=pd.concat([thissport,Mastersignups], ignore_index=True)
Mastersignups=Mastersignups[mycols] # put back in original order
# drop duplicates and save master signups file (keep older signup if present... already assigned SUkey)
Mastersignups=Mastersignups.sort_values(['Plakey', 'Sport','Year','SUkey'], ascending=False) # keeps oldest signup
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
newsignups=len(Mastersignups)-startlen # number of new signups added this pass
print('Added ', str(newsignups),' new ', season, ' signups to master list.')
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.loc[index,'SUkey'] = availSUkeys[keycounter] # reassign SU key in source master list
keycounter+=1 # move to next available key
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace('K',0)
Mastersignups=Mastersignups.sort_values(['Year', 'Sport', 'Gender','Grade'], ascending=False)
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace(0,'K')
# autocsvbackup(Mastersignups,'master_signups', newback=True)
Mastersignups.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False, date_format='mm/dd/yy') # automatically saved
return Mastersignups
def replaceacro(df, acronyms):
''' Pass df column and return with acronyms replaced with full translations (parishes and schools
currently used only for CYC rosters '''
for index, row in acronyms.iterrows():
acro=acronyms.loc[index]['acronym']
transl=acronyms.loc[index]['translation']
# TODO only for parish columns
df.loc[:,'Parish of Registration']=df['Parish of Registration'].replace(acro, transl)
df.loc[:,'Parish of Residence']=df['Parish of Residence'].replace(acro, transl)
df.loc[:,'School']=df['School'].replace(acro, transl)
return df
def createrosters(df, season, year, players, teams, coaches, famcontact, acronyms):
''' From Mastersignups of this season creates Cabrini CYC roster and transfers (for separate sports)
and all junior sports (calculates ages for Judge Dowd); pulls info merged from famcontact, players, teams, and coaches
teams should already be assigned using teams xls and assigntoteams function
returns: None ... direct save to OUTPUT_DIR
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
specials=['Chess','Track']
sports=sportsdict.get(season)
sportlist=[sport for sport in sports if sport not in specials]
speciallist=[sport for sport in sports if sport in specials] # for track, chess, other oddballs
Specials=df[(df['Year']==year) & (df['Sport'].isin(speciallist))] # deal with these at bottom
# Proceed with all normal South Central sports
df = df[(df['Year']==year) & (df['Sport'].isin(sportlist))] # filter by year
# make duplicate entry row for double-rostered players (multiple team assignments)
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
doubles=df.loc[thismask]
for index, rows in doubles.iterrows():
team=doubles.loc[index,'Team']
team=team.split(',')[1] # grab 2nd of duplicate teams
doubles.loc[index, 'Team'] = team
df=pd.concat([df,doubles], ignore_index=True) # adds duplicate entry for double-rostered players with 2nd team
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
for index, val in thismask.iteritems():
if val:
team=df.loc[index]['Team']
team=team.split(',')[0] # grab 1st of duplicate teams
df.loc[index, 'Team'] = team # removes 2nd team from first entry
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# Get division from Teams xls
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2')) # effectively adds other team info for roster toall players
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
df.loc[:,'Role']='Player' # add column for role
# df['Open/Closed']=np.nan
df.loc[:,'Coach ID']=''
def formatDOB(val):
# Pat moore date format is 4/4/19.. reformat as string for csv output
try:
return datetime.strftime(val, "%m/%d/%y")
except:
# print('Problem converting %s of type %s to date string format' %(val, type(val)) )
return ''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
CabriniCYC=df.loc[thismask] # all players on Cabrini CYC teams all sports this season
# Finds info for CYC coaches (all sports) and generate roster entries
coachroster=addcoachestoroster(teams, coaches) # coaches roster already in correct format + sport column
if len(CabriniCYC)>1: # skip if all transfers or junior (i.e. in spring)
# Split by sport
for i, sport in enumerate(sportlist):
Sportroster=CabriniCYC[CabriniCYC['Sport']==sport]
# reformat this mess as single CYC roster
Sportroster=organizeroster(Sportroster)
# Add coaches from this sport to roster
Rostercoaches=coachroster[coachroster['Sport']==sport]
Rostercoaches=organizeroster(Rostercoaches)
Sportroster=pd.concat([Sportroster,Rostercoaches], ignore_index=True) # adds coaches and players together
Sportroster=Sportroster.sort_values(['Team','Role','Grade','Lname'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_'+sport+'roster'+str(year)+'.csv'
Sportroster=replaceacro(Sportroster, acronyms) # replace abbreviations
Sportroster.loc[:,'Birthdate']=Sportroster['Birthdate'].apply(lambda x: formatDOB(x))
Sportroster.to_csv(fname, index=False)
# done with Cabrini CYC rosters
# Break out all other types of teams (transfers, junior teams, Chess/Track)
thismask = df['Team'].str.contains('-', case=False, na=False)
Others=df.loc[~thismask] # no hyphen for all non Cabrini CYC level (Cabrini junior and transfers)
# Cabrini transferred players to CYC teams with # (i.e. Ambrose#8B, OLS#3G)
# Non-CYC cabrini junior teams start with number
thismask = Others['Team'].str.contains('#', na=True) # flag nans and set to true (usually jr teams w/o assignment)
# Transferred teams contain # such as OLS#3G
Transfers=Others.loc[thismask] # transferred teams have # but no hyphen
for i, sport in enumerate(sportlist): # output roster for all transfers (all grades in case of CYC)
Transferroster=Transfers[Transfers['Sport']==sport]
Transferroster=organizeroster(Transferroster)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname=cnf._OUTPUT_DIR+'\\CYC'+sport+'transfers.csv'
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.loc[:,'Birthdate']=Transferroster['Birthdate'].apply(lambda x: formatDOB(x))
Transferroster.to_csv(fname, index=False)
# Now deal with junior cabrini (should be only thing left after Cabrini CYC<
# transfers, special sports
Juniorteams=Others.loc[~thismask] # remove transfers
Juniorteams=Juniorteams[Juniorteams['Team']!='drop'] # remove dropped players
# now output all junior teams in same format (sometimes needed by <NAME>)
# also calculate current age
if len(Juniorteams)>0:
Juniorteams=organizeroster(Juniorteams) # put in standard South Central roster format
# Calculate current age from DOBs (renamed to Birthdate for roster only)
Juniorteams.loc[:,'Age']=calcage(Juniorteams['Birthdate'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_junior_teams_'+str(year)+'.csv'
Juniorteams=replaceacro(Juniorteams, acronyms)
Juniorteams.loc[:,'Birthdate']=Juniorteams['Birthdate'].apply(lambda x: formatDOB(x))
Juniorteams.to_csv(fname, index=False)
# Deal with special cases -Track and Chess
# Get DOB/school from players.. anything else needed by <NAME>?
Specials=pd.merge(Specials, players, how='left', on='Plakey', suffixes=('','_r'))
# needs address
Specials=pd.merge(Specials, famcontact, how='left', on='Famkey', suffixes=('','_r2'))
for i, sport in enumerate(speciallist): # output roster for all transfers (all grades in case of CYC)
Specials=Specials[Specials['Sport']==sport]
Specials=Specials.rename(columns={'DOB':'Birthdate'})
mycols=['First', 'Last','Gender','Team','Grade','Birthdate','School','Address','Zip']
Specials=Specials[mycols]
Specials=Specials.sort_values(['Gender', 'Birthdate', 'Grade'], ascending=True)
Specials.loc[:,'Birthdate']=Specials['Birthdate'].apply(lambda x: formatDOB(x))
fname= cnf._OUTPUT_DIR+'\\'+ sport+'_'+str(year)+'_rosters.csv'
Specials.to_csv(fname, index=False)
return
def makemultiteam(df):
'''Small utility called by assigntoteams to make temp teams df that has separate entry for each grade if team is mixed grade
then merge to assign teams is straightforward
twoteams- '''
# TODO annoying problem with combining teams due to K1 (string but not int)
mycols=df.dtypes.index
# Deal with K1, K2 and such teams
kteams=[str(s) for s in np.ndarray.tolist(df.Graderange.unique()) if 'K' in str(s)]
kteams=[s for s in kteams if len(s)>1] # combo teams only
kteams=df[df['Graderange'].isin(kteams)]
xtrateams=pd.DataFrame(index=np.arange(0,0),columns=mycols) # empty df
# clones rows to match lower grades in range
for index, row in kteams.iterrows():
tempstr= kteams.loc[index]['Graderange']
gr1=0 # 0 for grade K
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=kteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True)
# get rid of K string problem
df.loc[:,'Graderange']=df.Graderange.replace('K','0', regex=True)
df.loc[:,'Graderange']=df.Graderange.astype('int')
# now handle numbered multiteams (e.g. 45 78 two digit ints)
multiteams=df.loc[df['Graderange']>9] # subset of teams comprised of multiple grades
for index, row in multiteams.iterrows(): # check for 3 or more grades
# TODO make sure it's not 3 grades (i.e. K-2)
tempstr= str(multiteams.loc[index]['Graderange'])
gr1=int(tempstr[0])
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=multiteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
# Detect gender-grade-sport w/ two teams
# now combine with original df
df=pd.concat([df,xtrateams], ignore_index=True) # complete team set
df=df[mycols] # back in original order
df=df.sort_values(['Gender','Grade'], ascending=True)
# After cloning by grade, look for two teams per grade options
twoteams=df[df.duplicated(['Sport','Gender','Grade'])]
return df, twoteams
def detectrosterchange(PMroster, myroster):
'''Compare submitted and returned rosters to look for unique rows (altered by <NAME>)
first row is <NAME> version (presumably correct to match CYC database) and second row is my
submitted version... make any corrections to appropriate source data files
datetime format conversions can be problematic '''
# all columns by default, false drops both duplicates leaving unique rows
bothrosters=pd.concat([PMroster,myroster])
mycols=bothrosters.columns
nanrows=bothrosters[pd.isnull(bothrosters['Birthdate'])]
nanrows=nanrows.drop_duplicates(keep=False)
# ensure player rows are both in correct format
myroster=myroster[pd.notnull(myroster['Birthdate'])]
PMroster=PMroster[pd.notnull(PMroster['Birthdate'])]
def removeLeadZero(val):
if val.startswith('0'):
return val[1:]
else:
return val
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:removeLeadZero(x))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:removeLeadZero(x))
bothrosters=pd.concat([PMroster,myroster])
bothrosters=bothrosters.sort_values(['Fname','Lname'])
# Fix date string differences
alteredrows=bothrosters.drop_duplicates(keep=False)
alteredrows=alteredrows.append(nanrows)
alteredrows=alteredrows[mycols]
alteredrows=alteredrows.sort_values(['Lname','Fname'])
return alteredrows
def saveteams(teams):
'''Save teams tab into teams_coaches.xlsx after changes have been made '''
book=load_workbook('Teams_coaches.xlsx')
writer=pd.ExcelWriter('Teams_coaches.xlsx', engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
teams.to_excel(writer,sheet_name='Teams',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
def assigntoteams(df, season, year, teams, overwrite=False):
'''From mastersignups finds CYC team name based on year, grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
# teamsmult has multi grade range teams with duplicates for merge matching
# twoteams is multiple teams for same grade
Teamsmult, Twoteams =makemultiteam(teams) # makes duplicates team entries to match both grades
# compare grades as ints with K=0
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True) # convert Ks to zeros
df.loc[:,'Grade']=df['Grade'].astype('int')
Teamsmult.loc[:,'Grade']=Teamsmult['Grade'].astype('int') # ensure these are ints
# left merge keeps all master_signups oentries
df=pd.merge(df, Teamsmult, how='left', on=['Year','Grade','Gender','Sport'], suffixes=('','_r'))
# need to drop SUkey duplicates (keeping first)... occurs if >1 team per grade
df=df.drop_duplicates(subset=['SUkey']) # drops any duplicates by unique SUkey
# Consider all sports except Track (team assignment done separately by DOB)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# this is post-merge so no chance of getting indices screwed up
# select current sports & year and subset with new team assignment
CurrentSU=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year) & (pd.notnull(df['Team_r']))]
if overwrite==False: # if no overwrite, keep only those with nan for team
CurrentSU=CurrentSU.loc[pd.isnull(CurrentSU['Team'])]
# Never overwrite team assignment for known drops
CurrentSU=CurrentSU[CurrentSU['Team']!='drop']
counter=0
for index, row in CurrentSU.iterrows():
# all remaining can be overwritted (those w/ existing team dropped above)
match=df[df['SUkey']==CurrentSU.loc[index]['SUkey']]
if len(match)==1:
thisind=match.index[0]
# add new team assignment to correct index in original master signups
df.loc[thisind, 'Team'] = CurrentSU.loc[index]['Team_r']
counter+=1
print(str(counter),' player(s) newly assigned to teams')
# now drop extra columns and sort
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family',
'SUdate', 'Issue date', 'Uniform#','UniReturnDate']
df.loc[:,'Grade']=df.Grade.replace('K',0)
df=df.sort_values(['Year','Sport', 'Gender', 'Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace('0','K', regex=True) # make sure any 0 grades are again replaced with K
df=df[mycols]
autocsvbackup(df,'master_signups', newback=True) # autobackup of master signups
df.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False) # save/overwrite existing csv
return df
def assigntrackgroup(df, year, players):
'''Assign to different track team based on age on May 31 of this year (school year+1)
'''
Track=df[(df['Sport']=='Track') & (df['Year']==year)]
Track=pd.merge(Track,players, how='left', on=['Plakey'], suffixes=('','2'))
numunassigned=len(Track[pd.isnull(Track['Team'])])
for index, row in Track.iterrows():
DOB=Track.loc[index]['DOB'] # merged from players.csv
if isinstance(DOB,str):
DOB=datetime.strptime(DOB,"%m/%d/%Y").date() # convert string to datetime
elif isinstance(DOB, pd.tslib.Timestamp):
DOB=DOB.date() # convert timestamp to datetime
trackage=date(year+1,5,31)-DOB # age on prior year's May 31st (same as school year in current convention)
trackage=(trackage.days + trackage.seconds/86400)/365.2425 # as decimal
trackage=math.floor(trackage)
if trackage <=7:
team='Track7'
elif 8 <= trackage <=9:
team='Track89'
elif 10 <= trackage <=11:
team='Track1011'
elif 12 <= trackage <=13:
team='Track1213'
elif 14 <= trackage <=15:
team='Track1415'
else: # probably some entry error
mystr=Track.loc[index]['First']+' '+Track.loc[index]['Last']+' Grade:'+Track.loc[index]['Grade']
print('Suspected DOB error for',mystr, 'DOB:', datetime.strftime(DOB, "%m/%d/%y") )
team=''
# Now write back altered subset to mastersignups (index is lost so use SUkey)
SUkey=int(Track.loc[index]['SUkey'])
match=df[df['SUkey']==SUkey] # This gives correct index
df.loc[match.index[0], 'Team'] = team # alter/assign team for this signup
newlyassigned=numunassigned-len(Track[ | pd.isnull(Track['Team']) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# ### Packages required for the webscrapping
#When we visit a web page, our web browser makes a request to a web server.
import requests
#Beautiful Soup is a Python library for pulling data out of HTML and XML files
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import time
# ### Get the URLs
#This function helps us create the link for each publication on the page
def url_obt(x):
# Extract href attribute and paste 'https://urbania.pe' to it.
#So, we get the URLs of each publication
return("https://urbania.pe"+x.attrs["href"])
#With this code we can get all the URLs, dynamically, on the webpage
i=1
#This is an empty list that will contain all the URLs
urls_pag = []
#While let us iterate the process of getting the URLs until it is FALSE
while True :
#From webpage to webpage, the only change we noticed is the number of the page. So we need to change the i
#that respresents the number of the page.
#We used the horizontal crawling for getting the URL from the first webpage to the last one.
urlpag = 'https://urbania.pe/buscar/venta-de-propiedades?page='+str(i)
#we fetch the content from the url, using the requests library
resp = requests.get(urlpag)
##we use the html parser to parse the url content and store it in a variable.
soup = BeautifulSoup(resp.text, 'html.parser')
#We use the tag article and then select its children 'a'. soup.select() returns a list of all possible matches,
#so you can easily iterate over it and call each element. This will help us to get the URLs
etiquetas_a = soup.select('article > a')
#Every page has publications, when we don´t find any publication. That means the page doesn´t exist and we stop the
#horizontal scrawling
if len(etiquetas_a)==0:
#The break statement terminates the loop containing it
break
#'map(url_obt,etiquetas_a)' obtains the href of each element on the list etiquetas_a using the function url_obt.
#So,we get the URLs of each publication.
#'Map' applies a function to all the items in an input_list.
#Extends list by appending elements from the iterable
urls_pag.extend(list(map(url_obt,etiquetas_a)))
#The counter used by while.
i = i+1
#urls_pag
#Save the URLs with pandas
#pd.to_pickle(urls_pag, 'urls_pag.pkl') #to save the dataframe
#urls_pag = pd.read_pickle("urls_pag.pkl")
# ## Get the URL, price in USD, area and coordinates.
#This function will help us when we don´t find a value to consider this as an empty value
def fill_na(fun):
#If an error is encountered, a try block code execution is stopped and transferred
#down to the except block.
try:
x = fun('')
except Exception as e:
x = ''
return x
# ### Get the price in USD
#This funtion gets the price in US$ of a property
#We need one parameter, a soup object.
def obtener_precio_dolares(posi_soup):
try :
#We select the tag 'div.b-leading-price-property.u-flex-wrap' and all the 'p' tag inside.
lista_p1 = posi_soup.select_one('div.b-leading-price-property.u-flex-wrap').select('p')
#We look for '$' in order to find the price in $
precio_obt = [p.text for p in lista_p1 if p.text.find('$')>0][0]
#As the structure of the page changes, this step should be considered to generalize the code.
inicio = precio_obt.find('US$')
precio_esp = precio_obt[inicio:]
precio_esp = precio_esp.replace('\n','')
#Sometimes the price is between parentheses. We want all the prices to have the same format.
#That is why we must eliminate parentheses.
precio_1 = precio_esp.replace('(','')
precio_propiedad = precio_1.replace(')','')
except:
precio_propiedad = ''
return(precio_propiedad)
#We keep the US$, just to show that the price is in dollars.
# ### Get the area
#this function selects the text where the area of the property is indicated
#You need one parameter, a soup object.
#The position of the CSS selector change, so we consider this code for finding the area.
def obt_posicion_area(posi_soup):
try :
#'We select 'div.b-ubication' and then we look for the 'p' tags. Then, we use a 'for' looking for 'm2'. That
#indicates where the area is stored.
lista_p = posi_soup.select_one('div.b-ubication').select('p')
#We get the text where we find the area.
area_propiedad = [p.text for p in lista_p if p.text.find('m2')>0][0]
except :
#When we don´t find 'm2'. That means the publications doesn´t have a value for the area.
area_propiedad=''
return(area_propiedad)
# In[37]:
#This function gets the area in the text that contains it.
#The parameter we need is the text where the area is found.
def definir_area(datos_area):
try :
#We separate the list into strings, using a white space ' '. Then we look for 'm2' since the previous values indicate the area.
separados = datos_area.split(' ')
#We look for the position of 'm2'.
find_m2 = separados.index('m2')
#If 'm2' is in position 1, then the value in position 0 is the area of the property.
if(find_m2==1):
area_propiedad = separados[:(find_m2)]
#If m2 is not in position 1, then:
else:
# If separados[find_m2-2] is equal to '-'. Then, it means that there is a range of areas in the property.
if(separados[find_m2-2]=='-'):
#So the strings are taken considering 3 previous positions to the position of find_m2.
area_propiedad = separados[(find_m2-3):find_m2]
#In case the above is not fulfilled, then, only a position prior to m2 is considered.
else:
area_propiedad = separados[(find_m2-1)]
#We join the strings
area = ''.join(area_propiedad)
except :
#When m2 is not ound, tht means there isn´t a value
area = ''
return(area)
# ### Get the coordinates
#This function gets the position of the coordinates
#The parameter we need is the object 'soup'.
def obt_posicion_coor(posi_soup):
try :
#We create a list of all the tags 'script' on the page.
lista_script = posi_soup.select('script')
#Create a list that contains only the text that has the word 'longitud'.
#This code was made due to the position of the coordinates varied day by day.
script = [i.text for i in lista_script if i.text.find('longitud')>0][0]
except :
script = ''
return(script)
#This function gets the longitude of the property
#The parameter you need is the text where the we find the longitude value
def get_longitud(coordenadas):
try :
#coordinates
#Between these two strings is the value of the longitude.
#We delete the spaces in the text
coordenadas = coordenadas.replace(' ','')
#We get the position where the value longitude is
inicio_longitud = coordenadas.find('longitud":"')
inicio_longitud
fin_longitud = coordenadas.find('",\n\t\t"address"')
fin_longitud
#We get the value of longitude
longitude = coordenadas[inicio_longitud+11:fin_longitud]
except :
longitude = ''
return(longitude)
#This function gets the latitude of the property
#The parameter you need is the text where the we find the latitude value
def get_latitud(coordenadas):
try :
#coordinates
#Between these two strings is the value of the longitude.
#We delete the spaces in the text
coordenadas = coordenadas.replace(' ','')
#We get the position where the value latitude is
inicio_latitud=coordenadas.find('latitud":"')
fin_latitud=coordenadas.find('",\n\t"longitud"')
fin_latitud
latitude = coordenadas[inicio_latitud+10:fin_latitud]
except :
latitude = ''
return(latitude)
# ### Get all the data
#An empty list that stores the URL, price, area and coordinates.
lista_datos = []
for information in urls_pag:
try:
#The 'for' gets the information of each publication. We get the URL, price, area and coordinates.
#From webpage to webpage, the only change we noticed is the number of the page. So we need to move from page to page
#in order to get the information of all the publications.
resp = requests.get(information)
print(resp.url)
#Sometimes the URL in resp.url is different from the one in information, that's why we use this 'if'.
if resp.url != information :
#If there is a difference, the code won´t stop. It just will skip to the next URL
continue
#create an instance of the BeautifulSoup class to parse our document
soup = BeautifulSoup(resp.text, 'html.parser')
# PRICE
#precio contains the price in dollars
precio = obtener_precio_dolares(soup)
#AREA
#We obtain the text that contains area
posi_area=obt_posicion_area(soup)
#We select just the value for the area
str_area = definir_area(posi_area)
#COORDINATES
#We get the text that contains the coordinates
coor = obt_posicion_coor(soup)
#We get the longitude and latitude
longi = get_longitud(coor)
lat = get_latitud(coor)
#We store the information in the list 'conjunto'
conjunto = [resp.url,precio,str_area,longi,lat]
except :
conjunto = []
#We add the information in the list 'lista_datos'
lista_datos.append(conjunto)
# ### List with URL, price, area and coordinates.
#We get a list with all the information
#The URL is complete here
#lista_datos
# ### Table with the information
#We make a table with the information
tabla_lista_datos = | pd.DataFrame(lista_datos,columns=['URL','Precio','Area','Latitud','Longitud']) | pandas.DataFrame |
import abc
import datetime
import itertools
import os
import time
import warnings
from operator import attrgetter
import imageio
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
import sklearn.metrics
import sympy as sym
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.spatial import ConvexHull
from sklearn.base import clone
from sklearn.externals import joblib
from sklearn.metrics.scorer import make_scorer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
import dill as pickle
from analyze_panel_data.preprocessing.dimension_reduction import \
reduce_dim_of_panel
from analyze_panel_data.utils import split_pipeline_at_step
from ..model_selection.split_data_target import \
split_panel_into_data_and_target_and_fill_missing
from ..utils import hash_string, multiindex_to_panel
from ..visualization import inferred_model as vis_model
from ..visualization.inferred_model import (
aggregate_dimensions_of_grid_points_and_velocities,
bounding_grid, make_axis_labels,
mask_arrays_with_convex_hull)
from ..visualization.utils import (convert_None_to_empty_dict_else_copy,
create_fig_ax, maybe_save_fig,
shifted_color_map)
regression_metrics = [
sklearn.metrics.r2_score,
sklearn.metrics.mean_squared_error,
sklearn.metrics.mean_absolute_error]
scoring_values_sklearn = [
'neg_mean_absolute_error',
'neg_mean_squared_error',
'neg_median_absolute_error',
'r2']
MAX_LENGTH_FILE_NAME = 200
class DimensionReducingModel(metaclass=abc.ABCMeta):
"""An estimator that optionally reduces dimensions as an intermediate step.
"""
@abc.abstractmethod
def __init__(self, model, dim_reducer, predictor_from_reduced_dimensions):
"""
model : estimator with `fit` and `predict` methods
dim_reducer : estimator
The part of the model that reduces dimensions.
predictor_from_reduced_dimensions : estimator
The part of the model that predicts the target from inputs that
are the output of `dim_reducer`.
"""
self.model = model
self.dim_reducer = dim_reducer
self.predictor_from_reduced_dimensions = (
predictor_from_reduced_dimensions)
@abc.abstractmethod
def reduce_dimensions(self, X):
"""Reduce dimensions of the matrix X."""
def __str__(self):
template = ('{cls}(dim_reducer={dim_reducer}, '
'predictor_from_reduced_dimensions={pred})')
return template.format(
cls=self.__class__.__name__, dim_reducer=self.dim_reducer,
pred=self.predictor_from_reduced_dimensions)
def predict_from_reduced_dimensions(self, X_dim_reduced):
"""Predict the target from a matrix of dimension-reduced data."""
return self.predictor_from_reduced_dimensions.predict(
np.array(X_dim_reduced))
def predict_and_reduce_from_reduced_dimensions(self, X_dim_reduced):
return self.reduce_dimensions(
self.predict_from_reduced_dimensions(np.array(X_dim_reduced)))
class ModelThatDoesNotReduceDimensions(DimensionReducingModel):
"""A model that does not reduce dimensions as an intermediate step.
Its dimension reducer is the identity function.
"""
def __init__(self, model):
super(ModelThatDoesNotReduceDimensions, self).__init__(
model=model, dim_reducer=FunctionTransformer(None),
predictor_from_reduced_dimensions=model)
def reduce_dimensions(self, X):
"""Reduce dimensions of the matrix X.
This model does not reduce dimensions as an intermediate step."""
return X
class DimensionReducingPipeline(DimensionReducingModel):
"""An sklearn Pipeline that may reduce dimensions as an intermediate step.
"""
def __init__(self, pipeline, step_of_dimension_reducer):
"""Create a dimension-reducing Pipeline model.
Parameters
----------
pipeline : sklearn Pipeline
The model
step_of_dimension_reducer : str
The name of the step in the pipeline that reduces dimensions
"""
reducer, predictor = split_pipeline_at_step(
pipeline, step_of_dimension_reducer)
super(DimensionReducingPipeline, self).__init__(
model=pipeline, dim_reducer=reducer,
predictor_from_reduced_dimensions=predictor)
def reduce_dimensions(self, X):
return self.dim_reducer.transform(X)
class _BaseDimensionReducingPanelModel(metaclass=abc.ABCMeta):
"""Train a model to predict a panel dataset, and analyze those predictions.
Note: do not use this class. Use a class that inherits from it, such as
`DimensionReducingPanelModelSKLearn` or `DimensionReducingPanelModelKeras`.
"""
def __init__(
self,
panel,
model_with_hyperparameter_search,
model_predicts_change,
validate_on_full_dimensions=False,
num_lags=1, as_sequence=False,
dim_reducer_preprocessing=None,
fit_preprocessing_dimension_reduction_to_train=True,
target_transformer=None,
fill_missing_value=0.0,
metric=sklearn.metrics.mean_squared_error,
metric_greater_is_better=False,
results_directory='results', overwrite_existing_results=False,
already_fitted_model_in_directory=None):
"""A panel dataset and model of it, with dimension reduction optionally
as preprocessing step, a part of the model, and as a postprocessing
step.
Parameters
----------
panel : pandas Panel
The time-series data we want to train a model to predict.
* items denote different trajectories;
* major_axis is time;
* minor_axis is features of those times at those times.
model_with_hyperparameter_search : a model wrapped with a
hyperparameter search such as GridSearchCV or RandomizedGridSearchCV,
or `None`
The model we want to train to predict the panel data (i.e., to
predict the next time step given `num_lags` many previous
time steps). This must be a model wrapped with a hyperparameter
search object such as `sklearn.model_selection.RandomizedSearchCV`.
It should have a `cv` attribute that is a cross validator.
If None, then a filename in which a hyperparameter search's best
model has been saved should be given in the input
`already_fitted_model_in_directory`.
model_predicts_change : bool
Whether the model predicts the change between one time step and the
next (`model_predicts_change == True`) or predicts the value at the
next time step.
validate_on_full_dimensions : bool, default: False
Whether the validation score should be the score on the
original, full-dimensionsal data and target (without being
transformed by `dim_reducer_preprocessing`) or on the data and
target after they are transformed by `dim_reducer_preprocessing`.
num_lags : int, default: 1
The number of time lags to use for prediction. For example, if
`num_lags` is `1`, then the model predicts year `t` using the state
at year `t - 1`.
as_sequence : bool, default: False
Whether the data should have shape
`(num_samples, num_lags, num_features)` instead of shape
`(num_samples, num_lags * num_features)`. Set this to `True`
for recurrent neural networks, for example.
dim_reducer_preprocessing : transformer or None, default: None
A dimension reducer such as sklearn.decomposition.PCA. This is
applied as a preprocessing step in an unsupervised way. If None,
then no dimension reduction is done as a preprocessing step.
fit_preprocessing_dimension_reduction_to_train : bool, default: True
Whether to fit `dim_reducer_preprocessing` to just the first
training split of `cv`. It is a good idea to set this to `True` in
order to not cheat by letting the dimension reduction "see" the
data in the future compared to the first training set.
target_transformer : None or function
Function to apply to the target before fitting. This should be
an `sklearn.preprocessing.FunctionTransformer` with an inverse
function provided (`inverse_func`) so that predictions can be made
by calling the inverse function on the model's predictions.
This transformer should have a 'filename' attribute with a
descriptive string, for use in saving the data to the hard disk.
fill_missing_value : float, default: 0.0
The value to use to fill missing values.
metric : a metric for evaluating model performance
A scoring or loss function whose signature is
`y_true, y_pred, sample_weight=None` plus other optional keyword
arguments. E.g., `sklearn.metrics.mean_squared_error`.
This metric is used both for fitting the model and for evaluating
the model on out-of-sample data.
If `validate_on_full_dimensions` is true, then in cross
validation the scorer is changed so that it measures the score
on the full dataset (without being transformed by the
preprocessing dimension reduction).
metric_greater_is_better : bool, default: False
Whether the metric is a score function (higher output is better)
or a loss function (lower output is better; this is the default).
results_directory : name of the directory in which to put results
The results will be put into 'results_directory/{self.filename}/'
where `self.filename` is created by combining the `filename`
attributes of `panel`, `model_with_hyperparameter_search`, and
`dim_reducer_preprocessing`, as well as `num_lags` and
`validate_on_full_dimensions` (see Notes below).
overwrite_existing_results : bool, default: False
Whether to overwrite existing results if a
`DimensionReducingPanelModel` with this results_path and filename
(see note below) already exists.
already_fitted_model_in_directory : None or str
If a string, then a fitted model is found in the path
`os.path.join(results_directory, already_fitted_model_in_directory,
'best_model')`
This model is loaded into `self.best_model`. In this case, the
input for `model_with_hyperparameter_search` should be None.
Notes
-----
A file name for this object is created using the `filename` attribute
of `panel`, `model_with_hyperparameter_search`, `cv`,
`model_predicts_change`, `dim_reducer_preprocessing` (as well as
`num_lags` and `validate_on_full_dimensions`).
If those objects do not have a `filename` attribute, then no error is
thrown, and a generic fallback string is used (such as 'model' if
`model_with_hyperparameter_search` does not have a `filename`
attribute).
"""
self.panel = panel
self.model = model_with_hyperparameter_search
self.model_predicts_change = model_predicts_change
self.validate_on_full_dimensions = validate_on_full_dimensions
self.reduces_dim_preprocessing = dim_reducer_preprocessing is not None
self.dim_reducer_preprocessing = (
FunctionTransformer(None) if dim_reducer_preprocessing is None
else dim_reducer_preprocessing)
self.target_transformer = (
FunctionTransformer(None) if target_transformer is None
else target_transformer)
self.fill_missing_value = fill_missing_value
self.as_sequence = as_sequence
self.num_lags = num_lags
self.fit_preprocessing_dimension_reduction_to_train = (
fit_preprocessing_dimension_reduction_to_train)
self.metric = metric
self.metric_greater_is_better = metric_greater_is_better
self.overwrite_existing_results = overwrite_existing_results
self.attributes_to_join_to_make_filename = [
'model', 'panel',
'dim_reducer_preprocessing', 'target_transformer']
self._results_directory = results_directory
if already_fitted_model_in_directory is not None:
# The model has already been fitted and is located in the path
# results_path/already_fitted_model_in_directory/best_model
self._filename = already_fitted_model_in_directory
if not os.path.exists(os.path.join(self.results_path)):
m = ('A directory `{d}` was given, but this directory is'
'not found in the results directory `{res}`.')
raise ValueError(m.format(d=already_fitted_model_in_directory,
res=results_directory))
os.makedirs(self.results_path, exist_ok=True)
os.makedirs(self.animations_path, exist_ok=True)
os.makedirs(self.plot_path, exist_ok=True)
os.makedirs(self.path_to_best_model_directory, exist_ok=True)
self.load_best_model()
else:
self._filename = self._create_filename()
self.create_results_paths()
self.best_model = None
self.fit_time = None
self.reduce_dimensions_preprocessing_and_fill_missing_values()
self.compute_data_target()
# self.set_up_times_in_cv_and_model_cv_if_needed()
# self._set_times_attribute_if_needed(self.cv)
if hasattr(self.model, 'cv'):
self._set_times_attribute_if_needed(self.model.cv)
set_scoring_to_evaluate_on_full_dimensions = (
self.validate_on_full_dimensions and
hasattr(self.model, 'scoring') and self.reduces_dim_preprocessing)
if set_scoring_to_evaluate_on_full_dimensions:
print('Setting scoring to validate on unreduced dimensions')
self.model.scoring = self._create_scorer_for_full_dimensions()
elif hasattr(self.model, 'scoring'):
self.model.scoring = make_scorer(
self.metric, greater_is_better=metric_greater_is_better)
# These are set to not-None values by methods:
self.model_dim_reducer = None
self.model_predictor_from_reduced_dim = None
self.dimension_reducing_model = None
self._n_reduced_dimensions_in_model = (
'The model has not yet been fitted or not yet split into a '
'dimension reducer and predictor. First fit the model using the '
'method `fit_model_to_entire_dataset_and_save_best_model`. Then '
'run `split_best_model_into_dimension_reducer_and_predictor` '
'with the appropriate parameters.')
def _get_filename_of_attribute(self, attribute):
"""Get the `filename` attribute of an attribute of `self`.
This method is used to create a brief filename using the method
`_create_filename`."""
attribute_value = self.__getattribute__(attribute)
if (attribute_value is None or
(isinstance(attribute_value, FunctionTransformer) and
attribute_value.func is None)):
return 'None'
else:
return getattr(attribute_value, 'filename', attribute)
def _create_filename(self):
"""Return a string that summarizes this panel, model, and other
parameters."""
filename = '__'.join(
'{a}={val}'.format(a=a, val=self._get_filename_of_attribute(a))
for a in self.attributes_to_join_to_make_filename)
extra = ('__validate_on_full_dimensions={val_full}__num_lags={lag}'
'__model_predicts_change={change}__metric={metric}')
filename += extra.format(
val_full=self.validate_on_full_dimensions, lag=self.num_lags,
change=self.model_predicts_change,
metric=getattr(self.metric, '__name__', str(self.metric)))
return filename
@property
def filename(self):
"""Return a string to use in file names, hashed if it is too long."""
if len(self._filename) > MAX_LENGTH_FILE_NAME:
return hash_string(self._filename)
else:
return self._filename
@property
def results_path(self):
return os.path.join(self._results_directory, self.filename)
@property
def animations_path(self):
return os.path.join(self.results_path, 'animations')
@property
def plot_path(self):
return os.path.join(self.results_path, 'plots')
@property
def path_to_best_model_directory(self):
"""Return the path to the directory where the best model is saved."""
return os.path.join(self.results_path, 'best_model')
@property
def best_model_filename(self):
return 'best_model'
@property
def path_to_best_model(self):
return os.path.join(self.path_to_best_model_directory,
self.best_model_filename)
@property
def cv_results_path(self):
return os.path.join(self.path_to_best_model_directory, 'cv_results')
@property
def fit_time_path(self):
return os.path.join(self.path_to_best_model_directory, 'fit_time')
@property
def n_reduced_dimensions_in_model(self):
"""The number of dimensions after the model's internal dimension
reducer is applied."""
return self._n_reduced_dimensions_in_model
def create_results_paths(self):
if os.path.exists(self.results_path):
msg = 'A directory already exists at the results path {}.'.format(
self.results_path)
if self.overwrite_existing_results:
msg += (' Continuing anyway (and overwriting results) '
'because `overwrite_existing_results` is `True`. '
' If you fit the model, it will overwrite the '
'previously saved fit.')
warnings.warn(msg)
else:
raise RuntimeError(
msg + ' Stopping now because `overwrite_existing_results`'
' is `False`. To load the existing results, set '
'`overwrite_existing_results=True` but do not fit the'
' model.')
os.makedirs(self.results_path, exist_ok=True)
os.makedirs(self.animations_path, exist_ok=True)
os.makedirs(self.plot_path, exist_ok=True)
os.makedirs(self.path_to_best_model_directory, exist_ok=True)
with open(os.path.join(self.results_path,
'description.txt'), 'w') as f:
f.write(self._filename)
def __str__(self):
"""Return a shorthand description of just the most important inputs.
"""
return '{cls_name}({inputs})'.format(
cls_name=self.__class__.__name__,
inputs=', '.join(
'\n\n\t{a}={val}'.format(a=a, val=self.__getattribute__(a))
for a in self.attributes_to_join_to_make_filename))
def reduce_dimensions_preprocessing_and_fill_missing_values(self):
"""Preprocess the panel data by reducing dimensions in an unsupervised
way."""
if (self.fit_preprocessing_dimension_reduction_to_train and
hasattr(self.model, 'cv')):
cross_validators = [self.model.cv]
else:
cross_validators = None # fit `dim_reducer` to the entire data
self.df_dim_reduced = reduce_dim_of_panel(
self.panel, self.dim_reducer_preprocessing,
fill_missing_value=self.fill_missing_value,
cross_validators=cross_validators)
self.n_reduced_dimensions_after_preprocessing = (
self.df_dim_reduced.shape[1])
self.panel_dim_reduced = multiindex_to_panel(self.df_dim_reduced)
assert not pd.isnull(
self.df_dim_reduced.dropna(axis=0, how='all')).any().any()
def compute_data_target(self):
"""Split the panel into data (what we predict from) and target (what
we predict).
Do this for both the original data before its dimensions are reduced
as a preprocessing step and for the panel after its dimensions are
reduced."""
self.X, self.y = split_panel_into_data_and_target_and_fill_missing(
self.panel_dim_reduced, num_lags=self.num_lags,
as_sequence=self.as_sequence,
target_is_difference=self.model_predicts_change)
self.X_full_dim, self.y_full_dim = (
split_panel_into_data_and_target_and_fill_missing(
self.panel, num_lags=self.num_lags,
target_is_difference=self.model_predicts_change))
self.y_full_dim = pd.DataFrame(
self.target_transformer.transform(self.y_full_dim),
index=self.y_full_dim.index, columns=self.y_full_dim.columns)
self.y = pd.DataFrame(
self.target_transformer.transform(self.y),
index=self.y.index, columns=self.y.columns)
assert not | pd.isnull(self.X) | pandas.isnull |
'''
Created on Aug 5, 2021
@author: paepcke
'''
import csv
import json
import os
from pathlib import Path
import shutil
import struct
import tempfile
import unittest
import zlib
import skorch
import torch
from experiment_manager.experiment_manager import ExperimentManager, AutoSaveThread, Datatype, \
JsonDumpableMixin
from experiment_manager.neural_net_config import NeuralNetConfig
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
TEST_ALL = True
#TEST_ALL = False
'''
TODO:
o test moving the experiment: ensure relative addressing!
'''
class Jsonable(JsonDumpableMixin):
def __init__(self):
self.my_dict = {'key1' : 'The goose',
'key2' : 'is cooked'
}
def json_dump(self, fname):
with open(fname, 'w') as fd:
json.dump(json.dumps(self.my_dict), fd)
@classmethod
def json_load(cls, fname):
my_dict = json.loads(json.load(fname))
obj = Jsonable()
obj.my_dict = my_dict
return obj
class ExperimentManagerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.curr_dir = os.path.dirname(__file__)
cls.exp_fname = 'experiment'
cls.prefab_exp_fname = 'fake_experiment'
cls.exp_root = os.path.join(cls.curr_dir, cls.exp_fname)
cls.prefab_exp_root = os.path.join(cls.curr_dir, cls.prefab_exp_fname)
def setUp(self):
try:
shutil.rmtree(self.prefab_exp_root)
except FileNotFoundError:
pass
os.makedirs(self.prefab_exp_root)
# Create a little torch model and save it:
models_dir = os.path.join(self.prefab_exp_root,'models')
os.makedirs(models_dir)
model_path = os.path.join(models_dir, 'tiny_model.pth')
self.tiny_model = TinyModel()
torch.save(self.tiny_model.state_dict(), model_path)
# Same for a skorch model:
self.tiny_skorch = skorch.classifier.NeuralNetBinaryClassifier(TinyModel)
self.tiny_skorch.initialize()
self.skorch_model_path = os.path.join(models_dir, 'tiny_skorch.pkl')
self.skorch_opt_path = os.path.join(models_dir, 'optimizer.pkl')
self.skorch_hist_path = os.path.join(models_dir, 'history.json')
self.tiny_skorch.save_params(f_params=self.skorch_model_path,
f_optimizer=self.skorch_opt_path,
f_history=self.skorch_hist_path)
# Create two little csv files:
csvs_dir = os.path.join(self.prefab_exp_root,'csv_files')
os.makedirs(csvs_dir)
self.make_csv_files(csvs_dir)
# Create a little json file:
json_dir = os.path.join(self.prefab_exp_root,'json_files')
os.makedirs(json_dir)
self.make_json_file(json_dir)
# Create some untyped files
untyped_dir = os.path.join(self.prefab_exp_root,'untyped_files')
os.makedirs(untyped_dir)
self.make_untyped_files(untyped_dir)
# Create a tiny png file:
figs_dir = os.path.join(self.prefab_exp_root,'figs')
os.makedirs(figs_dir)
with open(os.path.join(figs_dir, "tiny_png.png") ,"wb") as fd:
fd.write(self.makeGrayPNG([[0,255,0],[255,255,255],[0,255,0]]))
hparams_dir = os.path.join(self.prefab_exp_root,'hparams')
os.makedirs(hparams_dir)
self.hparams_path = self.make_neural_net_config_file(hparams_dir)
def tearDown(self):
try:
self.exp.close()
except:
pass
try:
shutil.rmtree(self.exp_root)
except:
pass
try:
shutil.rmtree(self.prefab_exp_root)
except:
pass
# ------------------- Tests --------------
#------------------------------------
# test_creation
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_creation(self):
exp = ExperimentManager(self.exp_root)
self.assertEqual(exp['root_path'], self.exp_root)
self.assertEqual(exp['_models_path'], os.path.join(self.exp_root, 'models'))
self.assertTrue(exp.csv_writers == {})
# Should have a json file in root dir:
self.assertTrue(os.path.exists(os.path.join(self.exp_root, 'experiment.json')))
# Delete and restore the experiment:
exp.close()
del exp
exp1 = ExperimentManager(self.exp_root)
# For cleanup in tearDown():
self.exp = exp1
self.assertEqual(exp1['root_path'], self.exp_root)
self.assertEqual(exp1['_models_path'], os.path.join(self.exp_root, 'models'))
self.assertTrue(exp1.csv_writers == {})
#------------------------------------
# test_dict_addition
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_dict_addition(self):
exp = ExperimentManager(self.exp_root)
# For cleanup in tearDown():
self.exp = exp
tst_dict = {'foo' : 10, 'bar' : 20}
csv_file_path = exp.save('first_dict', tst_dict)
with open(csv_file_path, 'r') as fd:
reader = csv.DictReader(fd)
self.assertEqual(reader.fieldnames, ['foo', 'bar'])
row_dict = next(reader)
self.assertEqual(list(row_dict.values()), ['10','20'])
self.assertEqual(list(row_dict.keys()), ['foo', 'bar'])
writers_dict = exp.csv_writers
self.assertEqual(len(writers_dict), 1)
wd_keys = list(writers_dict.keys())
first_key = wd_keys[0]
self.assertEqual(first_key, Path(csv_file_path).stem)
self.assertEqual(type(writers_dict[first_key]), csv.DictWriter)
# Add second row to the same csv:
row2_dict = {'foo' : 100, 'bar' : 200}
exp.save('first_dict', row2_dict)
# Second row should be [100, 200]:
with open(csv_file_path, 'r') as fd:
reader = csv.DictReader(fd)
row_dict0 = next(reader)
self.assertEqual(list(row_dict0.values()), ['10','20'])
row_dict1 = next(reader)
self.assertEqual(list(row_dict1.values()), ['100','200'])
# Should be able to just write a row, not a dict:
exp.save('first_dict', [1000,2000])
# Look at 3rd row should be ['1000', '2000']:
with open(csv_file_path, 'r') as fd:
reader = csv.DictReader(fd)
row_dict0 = next(reader)
self.assertEqual(list(row_dict0.values()), ['10','20'])
row_dict1 = next(reader)
self.assertEqual(list(row_dict1.values()), ['100','200'])
row_dict2 = next(reader)
self.assertEqual(list(row_dict2.values()), ['1000','2000'])
#------------------------------------
# test_saving_csv
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_saving_csv(self):
exp = ExperimentManager(self.exp_root)
tst_dict = {'foo' : 10, 'bar' : 20}
csv_file_path = exp.save('first_dict', tst_dict)
exp.close()
del exp
# Reconstitute the same experiment:
exp = ExperimentManager(self.exp_root)
# For cleanup in tearDown():
self.exp = exp
# First, ensure that the test dict
# is unharmed without using the ExperimentManager
# instance:
with open(csv_file_path, 'r') as fd:
reader = csv.DictReader(fd)
self.assertEqual(reader.fieldnames, ['foo', 'bar'])
row_dict = next(reader)
self.assertEqual(list(row_dict.values()), ['10','20'])
self.assertEqual(list(row_dict.keys()), ['foo', 'bar'])
# Now treat the experiment
writers_dict = exp.csv_writers
self.assertEqual(len(writers_dict), 1)
#------------------------------------
# test_csv_header_first
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_csv_header_first(self):
exp = ExperimentManager(self.exp_root)
self.exp = exp
csv_file_path = exp.save('header_first', item=None, header=['foo', 'bar'])
tst_list = [[1,2],[3,4]]
exp.save('header_first', tst_list)
exp.close()
with open(csv_file_path, 'r') as fd:
reader = csv.DictReader(fd)
first_row_dict = next(reader)
expected = {'foo' : '1', 'bar' : '2'}
self.assertDictEqual(first_row_dict, expected)
second_row_dict = next(reader)
expected = {'foo' : '3', 'bar' : '4'}
self.assertDictEqual(second_row_dict, expected)
#------------------------------------
# test_adding_to_csv_index_ignored
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_adding_to_csv_index_ignored(self):
exp = ExperimentManager(self.exp_root)
self.exp = exp
df = pd.DataFrame([[1,2,3],[10,20,30]], index=[(3, 0.5, 0.01), (4, 0.6, 0.08)], columns=[100,200,300])
exp.save('df', df)
exp.close()
row_dicts = self.read_csv_file('df')
expected = [{'100': '1', '200': '2', '300': '3'}, {'100': '10', '200': '20', '300': '30'}]
for i, one_dict in enumerate(row_dicts):
self.assertDictEqual(one_dict, expected[i])
exp = ExperimentManager(self.exp_root)
exp.save('df', df)
row_dicts = self.read_csv_file('df')
expected = [{'100': '1', '200': '2', '300': '3'}, {'100': '10', '200': '20', '300': '30'},
{'100': '1', '200': '2', '300': '3'}, {'100': '10', '200': '20', '300': '30'}]
for i, one_dict in enumerate(row_dicts):
self.assertDictEqual(one_dict, expected[i])
#------------------------------------
# test_adding_to_csv_index_included
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_adding_to_csv_index_included(self):
exp = ExperimentManager(self.exp_root)
self.exp = exp
df = pd.DataFrame([[1,2,3],[10,20,30]], index=[(3, 0.5, 0.01), (4, 0.6, 0.08)], columns=[100,200,300])
exp.save('df', df, index_col='the_idx')
exp.close()
row_dicts = self.read_csv_file('df')
expected = [{'the_idx': '(3, 0.5, 0.01)', '100': '1', '200': '2', '300': '3'},
{'the_idx': '(4, 0.6, 0.08)', '100': '10', '200': '20', '300': '30'}]
for i, one_dict in enumerate(row_dicts):
self.assertDictEqual(one_dict, expected[i])
exp = ExperimentManager(self.exp_root)
exp.save('df', df)
row_dicts = self.read_csv_file('df')
expected = [{'the_idx': '(3, 0.5, 0.01)', '100': '1', '200': '2', '300': '3'},
{'the_idx': '(4, 0.6, 0.08)', '100': '10', '200': '20', '300': '30'},
{'the_idx': '(3, 0.5, 0.01)', '100': '1', '200': '2', '300': '3'},
{'the_idx': '(4, 0.6, 0.08)', '100': '10', '200': '20', '300': '30'}
]
for i, one_dict in enumerate(row_dicts):
self.assertDictEqual(one_dict, expected[i])
#------------------------------------
# test_saving_hparams
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_saving_hparams(self):
exp = ExperimentManager(self.exp_root)
exp.add_hparams('my_config', self.hparams_path)
config_obj = exp['my_config']
# Should have a json export of the config instance:
saved_copy_path = os.path.join(exp.hparams_path, 'my_config.json')
with open(saved_copy_path, 'r') as fd:
json_str = fd.read()
other_config_obj = NeuralNetConfig.json_loads(json_str)
# Couple of spot checks that the config instance
# behaves as expected:
self.assertEqual(other_config_obj['Training']['net_name'], 'resnet18')
self.assertEqual(other_config_obj.getint('Parallelism', 'master_port'), 5678)
# The config instance should be available
# via the config key:
self.assertEqual(config_obj, exp['my_config'])
# Couple of spot checks that the config instance
# behaves as expected:
self.assertEqual(config_obj['Training']['net_name'], 'resnet18')
self.assertEqual(config_obj.getint('Parallelism', 'master_port'), 5678)
exp.close()
del exp
# Reconstitute the same experiment:
exp1 = ExperimentManager(self.exp_root)
# For cleanup in tearDown():
self.exp = exp1
config_obj = exp1['my_config']
self.assertEqual(config_obj['Training']['net_name'], 'resnet18')
self.assertEqual(config_obj.getint('Parallelism', 'master_port'), 5678)
#------------------------------------
# test_saving_dataframes
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_saving_dataframes(self):
exp = ExperimentManager(self.exp_root)
df = pd.DataFrame([[1,2,3],
[4,5,6],
[7,8,9]],
columns=['foo','bar','fum'],
index= ['row1','row2','row3'])
# Save without the row labels (i.e w/o the index):
dst_without_idx = exp.save('mydf', df)
self.assertEqual(dst_without_idx, exp.csv_writers['mydf'].fd.name)
df_retrieved_no_idx_saved = pd.read_csv(dst_without_idx)
# Should have:
# foo bar fum
# 0 1 2 3
# 1 4 5 6
# 2 7 8 9
df_true_no_idx = | pd.DataFrame.from_dict({'foo' : [1,4,7], 'bar' : [2,5,8], 'fum' : [3,6,9]}) | pandas.DataFrame.from_dict |
import pytest
from dagster import (
AssetMaterialization,
DagsterInvariantViolationError,
DagsterType,
EventMetadataEntry,
Field,
In,
Out,
Output,
Selector,
check_dagster_type,
dagster_type_loader,
dagster_type_materializer,
graph,
op,
)
from dagster.utils import safe_tempfile_path
from dagster_pandas.constraints import (
ColumnDTypeInSetConstraint,
InRangeColumnConstraint,
NonNullableColumnConstraint,
)
from dagster_pandas.data_frame import _execute_summary_stats, create_dagster_pandas_dataframe_type
from dagster_pandas.validation import PandasColumn
from pandas import DataFrame, read_csv
def test_create_pandas_dataframe_dagster_type():
TestDataFrame = create_dagster_pandas_dataframe_type(
name="TestDataFrame",
columns=[PandasColumn(name="foo", constraints=[ColumnDTypeInSetConstraint({"int64"})])],
)
assert isinstance(TestDataFrame, DagsterType)
def test_basic_pipeline_with_pandas_dataframe_dagster_type():
def compute_event_metadata(dataframe):
return {"max_pid": str(max(dataframe["pid"]))}
BasicDF = create_dagster_pandas_dataframe_type(
name="BasicDF",
columns=[
PandasColumn.integer_column("pid", non_nullable=True),
PandasColumn.string_column("names"),
],
event_metadata_fn=compute_event_metadata,
)
@op(out={"basic_dataframe": Out(dagster_type=BasicDF)})
def create_dataframe(_):
yield Output(
DataFrame({"pid": [1, 2, 3], "names": ["foo", "bar", "baz"]}),
output_name="basic_dataframe",
)
@graph
def basic_graph():
return create_dataframe()
result = basic_graph.execute_in_process()
assert result.success
for event in result.all_node_events:
if event.event_type_value == "STEP_OUTPUT":
mock_df_output_event_metadata = (
event.event_specific_data.type_check_data.metadata_entries
)
assert len(mock_df_output_event_metadata) == 1
assert any([entry.label == "max_pid" for entry in mock_df_output_event_metadata])
def test_create_dagster_pandas_dataframe_type_with_null_event_metadata_fn():
BasicDF = create_dagster_pandas_dataframe_type(
name="BasicDF",
columns=[
PandasColumn.integer_column("pid", non_nullable=True),
PandasColumn.string_column("names"),
],
event_metadata_fn=None,
)
assert isinstance(BasicDF, DagsterType)
basic_type_check = check_dagster_type(BasicDF, DataFrame({"pid": [1], "names": ["foo"]}))
assert basic_type_check.success
def test_bad_dataframe_type_returns_bad_stuff():
with pytest.raises(DagsterInvariantViolationError):
BadDFBadSummaryStats = create_dagster_pandas_dataframe_type(
"BadDF", event_metadata_fn=lambda _: "ksjdkfsd"
)
check_dagster_type(BadDFBadSummaryStats, DataFrame({"num": [1]}))
with pytest.raises(DagsterInvariantViolationError):
BadDFBadSummaryStatsListItem = create_dagster_pandas_dataframe_type(
"BadDF", event_metadata_fn=lambda _: ["ksjdkfsd"]
)
check_dagster_type(BadDFBadSummaryStatsListItem, DataFrame({"num": [1]}))
def test_dataframe_description_generation_just_type_constraint():
TestDataFrame = create_dagster_pandas_dataframe_type(
name="TestDataFrame",
columns=[PandasColumn(name="foo", constraints=[ColumnDTypeInSetConstraint({"int64"})])],
)
assert TestDataFrame.description == "\n### Columns\n**foo**: `int64`\n\n"
def test_dataframe_description_generation_no_type_constraint():
TestDataFrame = create_dagster_pandas_dataframe_type(
name="TestDataFrame",
columns=[PandasColumn(name="foo")],
)
assert TestDataFrame.description == "\n### Columns\n**foo**\n\n"
def test_dataframe_description_generation_multi_constraints():
TestDataFrame = create_dagster_pandas_dataframe_type(
name="TestDataFrame",
columns=[
PandasColumn(
name="foo",
constraints=[
ColumnDTypeInSetConstraint({"int64"}),
InRangeColumnConstraint(0, 100, ignore_missing_vals=False),
NonNullableColumnConstraint(),
],
),
],
)
assert (
TestDataFrame.description
== "\n### Columns\n**foo**: `int64`\n+ 0 < values < 100\n+ No Null values allowed.\n\n"
)
def test_execute_summary_stats_null_function():
assert _execute_summary_stats("foo", DataFrame(), None) == []
metadata_entries = _execute_summary_stats(
"foo",
DataFrame({"bar": [1, 2, 3]}),
lambda value: [EventMetadataEntry.text("baz", "qux", "quux")],
)
assert len(metadata_entries) == 1
assert metadata_entries[0].label == "qux"
assert metadata_entries[0].description == "quux"
assert metadata_entries[0].entry_data.text == "baz"
def test_execute_summary_stats_error():
with pytest.raises(DagsterInvariantViolationError):
assert _execute_summary_stats("foo", DataFrame({}), lambda value: "jajaja")
with pytest.raises(DagsterInvariantViolationError):
assert _execute_summary_stats(
"foo",
DataFrame({}),
lambda value: [EventMetadataEntry.text("baz", "qux", "quux"), "rofl"],
)
def test_execute_summary_stats_metadata_value_error():
with pytest.raises(DagsterInvariantViolationError):
assert _execute_summary_stats(
"foo", | DataFrame({}) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.