prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 09:55:29 2020
@author: Gary
"""
import pandas as pd
import core.Find_silent_change as fsc
import core.Read_FF as rff
import difflib
#import core.Construct_set as const_set
output = './out/'
tempdir = './tmp/'
arcdir = './arc_testing/'
upload_hash_ref = output+'upload_hash_ref.csv'
change_log = output+'silent_change_log.csv'
exclude_files = ['archive_2018_08_28.zip','sky_truth_final.zip']
skyfn = 'sky_truth_final'
def getDfForCompare(fn,sources='./sources/'):
fn = sources+fn
raw_df = rff.Read_FF(zname=fn).import_raw()
raw_df = raw_df[~(raw_df.IngredientKey.isna())]
return raw_df
def initializeSilentChangeRecords():
"""Careful!!! This does what the name describes!"""
ref = pd.DataFrame({'UploadKey':[],'last_hash':[]})
fsc.saveUpdatedHash(ref)
def startFromScratch():
"""Be aware - this initializes everything before running a LONG process on
all archived files!"""
initializeSilentChangeRecords()
archives = fsc.createInitialCompareList()
new = pd.DataFrame({'UploadKey':None,'rhash':None},index=[])
df = pd.DataFrame()
for arc in archives[:2]:
print(f'\nProcessing archive for silent changes:\n {arc}\n')
olddf = df.copy()
old = new.copy()
old = old.rename({'rhash':'last_hash'},axis=1)
df = getDfForCompare(arc[1],sources=arcdir)
df = df.fillna(-9)
new = fsc.makeHashTable(df)
#print(old.head())
out = fsc.compareHashTables(old,new)
print(f'Number silent changes: {out.silent_change.sum()}')
# finding problems...
if out.silent_change.sum()>0:
print('Silent changes detected...')
ukl = out[out.silent_change].UploadKey.unique().tolist()
print(f'Number of disclosures with silent change detected: {len(ukl)}')
#uk = out[out.silent_change].iloc[0].UploadKey
for uk in ukl[:10]:
if fsc.compareFrameAsStrings(olddf[olddf.UploadKey==uk],
df[df.UploadKey==uk]):
conc = pd.merge(olddf[olddf.UploadKey==uk],df[df.UploadKey==uk],on='IngredientKey',how='outer',
indicator=True)
cols = df.columns.tolist()
cols.remove('IngredientKey')
#print(f'Diff UploadKey: {uk}')
#print(f'length conc: {len(conc)}')
for col in cols:
x = col+'_x'
y = col+'_y'
conc['comp'] = conc[x]==conc[y]
if conc.comp.sum()<len(conc):
print(f'{conc[~conc.comp][[x,y]]}')
print(f'{col}, sum = {conc.comp.sum()}')
# =============================================================================
# conc = conc.reindex(sorted(conc.columns), axis=1)
# conc.to_csv('./tmp/temp.csv')
# =============================================================================
return out
def showDifference(uploadlst,olddf, df):
for uk in uploadlst:
print(f' Differences in {uk}')
if fsc.compareFrameAsStrings(olddf[olddf.UploadKey==uk],
df[df.UploadKey==uk]):
conc = pd.merge(olddf[olddf.UploadKey==uk],df[df.UploadKey==uk],on='IngredientKey',how='outer',
indicator=True)
cols = df.columns.tolist()
cols.remove('IngredientKey')
for col in cols:
x = col+'_x'
y = col+'_y'
conc['comp'] = conc[x]==conc[y]
if conc.comp.sum()<len(conc):
print(f'{conc[~conc.comp][[x,y]]}')
print(f'{col}, sum = {conc.comp.sum()}')
def startFromScratch2():
"""Be aware - this initializes everything before running a LONG process on
all archived files!"""
initializeSilentChangeRecords()
archives = fsc.createInitialCompareList()
#new = pd.DataFrame({'UploadKey':None,'rhash':None},index=[])
df = pd.DataFrame()
for arc in archives[-2:]:
print(f'\nProcessing archive for silent changes:\n {arc}\n')
olddf = df.copy()
df = getDfForCompare(arc[1],sources=arcdir)
if len(olddf)==0: # first run, nothing left to do
continue
oldulk = olddf.UploadKey.unique().tolist()
df = fsc.getNormalizedDF(df)
olddf = fsc.getNormalizedDF(olddf)
ulk = df.UploadKey.unique().tolist()
ukMissingFromNew = []
for uk in oldulk:
if uk not in ulk:
ukMissingFromNew.append(uk)
print(f' Number of UploadKeys gone missing in new set: {len(ukMissingFromNew)}')
# find matching records
mg = pd.merge(olddf,df,on=['UploadKey','IngredientKey'],how='outer',
indicator=True,validate='1:1')
common = mg[mg['_merge']=='both'][['UploadKey','IngredientKey']].copy()
newmg = pd.merge(common,df,on=['UploadKey','IngredientKey'],how='inner')
#print(newmg.columns)
newmg['rhash'] = | pd.util.hash_pandas_object(newmg,hash_key='1234') | pandas.util.hash_pandas_object |
import numpy as np
import pandas as pd
lt = 'f:/lt/'
region = pd.read_csv(lt + 'region.csv',sep='\t', index_col=0)
# 排除内蒙古和西藏
# prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '海南', '吉林',
# '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '广西', '重庆', '四川', '贵州', '云南',
# '陕西', '甘肃', '青海', '宁夏', '新疆']
prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '广西', '海南',
'吉林', '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '重庆', '四川', '贵州', '云南',
'陕西', '甘肃', '青海', '宁夏', '新疆', '内蒙古']
years = ['2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012']
worker = pd.read_csv(lt + 'worker.csv', sep='\t', index_col=0).join(region)
capital = pd.read_csv(lt + 'capital.csv', sep='\t', index_col=0).join(region)
energy = pd.read_csv(lt + 'energy.csv', sep='\t', index_col=0).join(region)
gdp = pd.read_csv(lt + 'gdp.csv', sep='\t', index_col=0).join(region)
co2 = | pd.read_csv(lt + 'co2.csv', sep='\t', index_col=0) | pandas.read_csv |
from matplotlib import pyplot as plt
import pandas as pd
plt.style.use("fivethirtyeight")
#df['py_dev_y']
ages_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [38496, 42000, 46752, 49320, 53200,
56000, 62316, 64928, 67317, 68748, 73752]
fig, (ax, ax1) = plt.subplots(nrows = 2, ncols = 1, sharex = True)
ax.plot(ages_x, dev_y, color="#444444", label="All Devs")
py_dev_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
ax.plot(ages_x, py_dev_y, color="#008fd5", label="Python")
js_dev_y = [37810, 43515, 46823, 49293, 53437,
56373, 62375, 66674, 68745, 68746, 74583]
ax.plot(ages_x, js_dev_y, color="#e5ae38", label="JavaScript")
data = {
"ages" : ages_x,
"Python": py_dev_y,
"JavaScript": js_dev_y
}
df = | pd.DataFrame(data) | pandas.DataFrame |
from datetime import time
from bs4 import BeautifulSoup
import json, random, re, requests
import pandas as pd
import csv
page = str(input("Page : "))
url = "https://indeks.kompas.com/?page="+page
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
data_berita = | pd.DataFrame() | pandas.DataFrame |
import os
import re
import pandas as pd
from .message import Message
from datetime import datetime
class KaggleDataSet(Message):
"""
Description
===========
- A class containing number of methods and attributes (almost all're initialized)
to create dataset in the form of csv/json etc. so that either
(a) they could be used to work with pandas
(b) or used to upload/publish our own created data on Kaggle (https://www.kaggle.com/datasets)
as the data to be uploaded on Kaggle should be excellent and unique
- This class provides attributes that you can use to
- This class provides you methods that asks user to enter data for each column
of rows 1 after 1 and generates csv/json as final output
"""
def __init__(self,
path='.',
extension = 'csv'
):
"""
A constructor
=============
- which initializes number of parameters to start the creation of Kaggle
dataset
Parameters
==========
- path: Absolute/relative path of the output file (csv, json)
- extension: Extension to use for the output file (default: csv)
"""
self.__validate_and_set(path, extension)
# Conatiner of enetered data (an input to pandas.DataFrame)
self.container = {}
# Used to store the number of enetered columns in the CSV
self.total_columns = 0
# Used to store the name of enetered name of all columns (initially blank)
self.columns = []
# Used to store length of all column names
self.collens = []
# Private variable to maintain the calling sequences
self.__states = {}
# If DataFrame is SET
self.df_set = False
# For implementing the feature of printing relevant messages to the console
self.message = Message()
# Used to store the type of data types of all columns
self.data_types = {}
# Used to store number of enetered rows
self.rows = 0
def __validate_and_set(self, path, extension):
"""
Description
===========
- Validates path and returuns a tuple => (filedir, filename, extension)
Opeartions
==========
>>> os.path.splitext("C:\\TC\\a.txt")
('C:\\TC\\a', '.txt')
>>>
>>> os.path.exists(".")
True
>>>
>>>
>>> re.match("^\w+(\w+[-_])*\w+$", "dffdfd-ddgg-$")
>>> re.match("^\w+(\w+[-_])*\w+$", "dffdfd-ddgg-dffd")
<_sre.SRE_Match object at 0x00000000029FCD50>
>>>
>>> re.match("^\w+(\w+[-_])*\w+$", "dffdfd-ddgg_dffd")
<_sre.SRE_Match object at 0x00000000029FCDC8>
>>>
>>> re.match("^\w+(\w+[-_])*\w+$", "dffdfd_ddgg_dffd")
<_sre.SRE_Match object at 0x00000000029FCD50>
>>>
>>> re.match("^\w+(\w+[-_])*\w+$", "dffdfd_ddgg+dffd")
>>>
"""
this_filename = datetime.now().strftime("DataSet-%d-%m-%Y-%H%M%S")
this_dir = '.' # os.path.dirname(os.path.abspath(__file__))
if path and type(path) is str:
filedir, file_w_ext = os.path.split(path)
filename, ext = os.path.splitext(file_w_ext)
if ext:
ext = ext.lstrip('.')
if ext in ['json', 'csv']:
extension = ext
else:
extension = 'csv'
else:
extension = "csv"
if not filedir:
filedir = this_dir
if not os.path.exists(filedir):
filedir = this_dir
if not re.match(r"^\w+(\w+[-_])*\w+$", filename):
filename = this_filename
self._Message__warning('Valid file names are: my-data-set, mydataset, my-data_set, mydataset.csv etc, so taking %s.%s' % (filename, extension));
else:
filename = this_filename;
filedir = this_dir
if not extension in ["json", 'csv']:
extension = 'csv';
# Used to store the relative/absolute path of the destination directory
# This value will be used while creating the JSON/CSV file
# If you do not provide file path while instantiation then the current
# directory (.) will be used
self.filedir = filedir
# Used to store the base name of file (A.py => A)
self.filename = filename
# Used to store the extension of the file
self.extension = extension
# Repeatedly check for an existence of specified filename,
# if it already exists (do not override)
# and choose another file name by appending numbers like 1, 2, 3 and so...on
self.__set_names()
def get_data_type(self, value):
"""
Description
===========
- It returns the type of data basically the result would be either 'numeric' or 'string'
- If the passed data is int/float or if contains a sequence of numbers including . (dot)
the returned value will always be 'numeric' otherwise 'string'.
Code
====
+ Below is the logic of getting the type of data
>>> import re
>>>
>>> numeric_regex = r"^(\d+)$|^(\d+\.\d+)$|^(\d*\.\d+)$|^(\d+\.\d*)$"
>>> re.match(numeric_regex, "14-08-1992")
>>> re.match(numeric_regex, "14081992")
<_sre.SRE_Match object; span=(0, 8), match='14081992'>
>>>
>>> re.match(numeric_regex, "140819.92")
<_sre.SRE_Match object; span=(0, 9), match='140819.92'>
>>> re.match(numeric_regex, "140819.")
<_sre.SRE_Match object; span=(0, 7), match='140819.'>
>>> re.match(numeric_regex, ".8855")
<_sre.SRE_Match object; span=(0, 5), match='.8855'>
>>> re.match(numeric_regex, ".")
>>> re.match(numeric_regex, ".2")
<_sre.SRE_Match object; span=(0, 2), match='.2'>
>>> re.match(numeric_regex, "4")
<_sre.SRE_Match object; span=(0, 1), match='4'>
>>>
"""
numeric_regex = r"^(\d+)$|^(\d+\.\d+)$|^(\d*\.\d+)$|^(\d+\.\d*)$"
if re.match(numeric_regex, str(value)):
_type = 'numeric';
else:
_type = 'string';
return _type;
def get_value_for(self, rowno, colname, max_col_len):
"""
Description
===========
- Returns the value entered on console
"""
s = "[DATA ENTRY] <row: " + str(rowno) + "> "
l = len(s) + max_col_len + 4
f = ("%-" + str(l) + "s : ") % (s + " " + colname)
value = input(f).strip()
_type = self.get_data_type(value)
if colname in self.data_types:
"""
{
'fullname': 'string',
'age': 'numeric'
}
"""
current_type = self.data_types[colname]
if _type != current_type:
if self.data_types[colname] == "numeric":
self._Message__warning('Previously this column was numeric, now it is of type string')
self.data_types[colname] = _type # Set to string
else:
self.data_types[colname] = _type
return value
def set_container(self):
"""
Description
===========
- Asks user to enter data for each rows, column by column
- Finally sets the container attribute of the class
"""
ask_for_data_entry = True
done = False
if self.__states.get('start'):
if self.__states.get('set_column_names'):
done = True
else:
self._Message__warning("You are directly trying to invoke, set_container() method"
", please call start() => set_column_names() methods first")
else:
self._Message__warning("You are directly trying to invoke, set_container() method"
", please call start() method first")
if done:
satisfied = False
rowno = 1
equals = "=" * 50
msg = "\n" + equals + "\nDo you want to add 1 more row / view data (y/n/v): "
max_col_len = max(self.collens)
while not satisfied:
if ask_for_data_entry:
for colname in self.columns:
value = self.get_value_for(rowno, colname, max_col_len)
if colname in self.container:
self.container[colname].append(value)
else:
self.container[colname] = [value]
inp = (input(msg).strip()).lower()
if inp == 'y' or inp == 'yes':
rowno += 1
print(equals)
ask_for_data_entry = True
continue # To continue with entering data for next row
elif inp.lower() == 'v' or inp.lower() == 'view':
self.__create() # Recreation of DataFrame with newly entered data
self._Message__data(self.df)
viewed = True
ask_for_data_entry = False
continue
else:
# This is just to make the code meaningful even break can also be used
nmtc = no_or_mistakenly_typed_confirmation = input("Is this mistakenly typed (y/n): ").strip()
if(nmtc.lower() == "n" or nmtc.lower() == "no"):
self.rows = rowno
satisfied = True
elif not(nmtc.lower() == 'y' or nmtc.lower() == 'yes'):
self._Message__warning("This is for your help, just type proper value to exit/continue")
else:
rowno += 1
print(equals)
ask_for_data_entry = True
self.__states["set_container"] = True
return True # Success
else:
return False # Failure
def set_column_names(self):
"""
Description
===========
- Asks user to enter the name of columns that will appear in csv
or (as keys in json object)
"""
if self.__states.get('start', None):
cols = self.total_columns # To short the name (value of cols >= 1)
d = {
1: '1st',
2: '2nd',
3: '3rd'
}
f = str(len(str(cols)) + 2) # cols => Total number of columns (extra 2 is for st, nd, rd, th etc.)
s = "Enter the name of %s column: " % ("%-" + f + "s")
i = 1
while i <= cols:
if i <= 3:
colname = input(s % (d[i]))
else:
colname = input(s % (str(i) + 'th'))
if not(re.match(r"^\w*(\w+[-_])*\w+$", colname)):
self._Message__warning("Please do not use characters for column names other than "
"A-Za-z0-9_-")
continue
if colname in self.columns:
self._Message__warning('The entered column name {} has been already choosen '
'(please enter another name)'.format(colname))
continue
self.columns.append(colname)
self.collens.append(len(colname))
i += 1
self.__states["set_column_names"] = True
return True # Success
else:
self._Message__warning("You are directly trying to invoke, set_column_names() method"
", please call start() method first")
return False # Failure
def start(self):
"""
Description
===========
- Initiates the process of creating dataset, asks for number of columns
- Valiates entered value (no. of columns), checks if that is a positive
integer
- Checks if it is >= 1
- Continues to ask user to enter proper value if it does not satisfy the
requirement
"""
everything_is_ok = False
while not everything_is_ok:
cols = input('Enter number of columns that you want in your dataset: ').strip();
if re.match(r"^\d+$", cols):
cols = int(cols)
if cols == 0:
self._Message__warning("You are looking for 0 column names, please enter >= 1")
continue
everything_is_ok = True
else:
self._Message__warning("The entered value doesn't look like a +ve integer "
"please enter a valid integer number")
self.total_columns = cols
self.__states = {"start": True}
# Do not need to add \n either at beginning or end while calling messages
# function like success() / warning() / error() etc.
self._Message__success("You are successfully done with no. of columns")
ret = self.set_column_names()
if ret:
self._Message__success("You are successfully done with the column names")
else:
self._Message__error("Something unexpected happened")
ret = self.set_container()
if ret:
self._Message__success("You are successfully done with entering data for your dataset")
else:
self._Message__error("Something unexpected happened")
def status_is_ok(self):
states = self.__states
if states.get("start", None):
if states.get("set_column_names", None):
if states.get("set_container", None):
return True
else:
self._Message__warning("You are directly trying to invoke, view() method"
", please call start() => set_column_names() => set_container() methods first")
else:
self._Message__warning("You are directly trying to invoke, view() method"
", please call start() => set_column_names() methods first")
else:
self._Message__warning("You are directly trying to invoke, view() method"
", please call start() method first")
return False # Failure
def __create(self, **kwargs):
self.df = | pd.DataFrame(self.container) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
| _maybe_remove(store, "df1") | pandas.tests.io.pytables.common._maybe_remove |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": | pandas.StringDtype() | pandas.StringDtype |
# -*- coding: utf-8 -*-
'''
Analysis module for analysis of angle-dependence
Author:
<NAME>,
Max Planck Institute of Microstructure Physics, Halle
Weinberg 2
06120 Halle
<EMAIL>
'''
''' Input zone '''
# ____________________________________________________________________________
# SETTINGS
# Data
'''
"selectFileType"
How to select input files:
Mode 0: Select each file seperately through UI
Mode 1: Select file that specifies all file locations
Mode 2: Give file locations file in code (need to know what you are doing)
'''
selectFileType = 2
'''
"analysisMode":
Requirements for different modes:
a) Lineshape analysis (frequency-dependence)
b) AMR calibration
c) Irf calibration
d) PHE and AHE calibration
Mode 0:
Plotting mode. Requires only angle-dependent data
Mode 1:
"c-free" fitting. V_amr is a fitting parameter and Vs and Va are fitted
simulatneously to ensure Vamr is the same for both fits.
Requirement: a)
Mode 2:
Quantitative fitting. Torques have quantitative meaning.
Requirements: a)-c)
Mode 3:
Semi-quantitative fitting with generalized Karimeddiny artifact description.
Requirements: a)-c)
Mode 4:
Semi-quantitative fitting with generalized Karimeddiny artifact descirption
in XX and XY direction.
Requirements: a)-d)
'''
analysisMode = 4
'''
"Vset_mode":
Only for analysisMode 4.
Specify which data to use for fitting.
0: Vsxx, Vaxx, Vsxy
1: Vsxx, Vaxx, Vaxy
'''
Vset_mode = 0
voltageMagnitude = 'mu' # V
flipSign = False
fit_phi_offset = False # Only implements for c-free mode
fit_comps_list = ['xyz'] # Select assumed torque components
assume_arts = True
norm_to = 'yFL' # Only for mode 1. Specify which torque component to normalize to.
plotPhiMode = 1 # 0: raw angle, 1: shifted angle
delta_phi = 45 # distance between angle tick values (deg)
plotDpi = 600
saveData = True
''' Input zone ends here. '''
# ____________________________________________________________________________
# CODE
import tkinter as tk
from tkinter import filedialog
import pandas as pd
import matplotlib.pyplot as plt
from files import File
from plots import GenPlot, BoxText
from helpers.file_handling import read_csv_Series
import numpy as np
import modules.stfmrAnglePlotFitting as apf
from modules.stfmrAnglePlotFittingCFree import angleDepFittingCFree, get_norm_torques
from modules.stfmrKarimeddinyFitting import V_Karimeddiny_fitting, get_norm_torques_karimed, calc_Ru
from modules.stfmrKarimeddinyHallFitting import V_Karimeddiny_Hall_fitting, get_norm_torques_karimed, calc_Ru
import stfmrHelpers.stfmrAnglePlotFitHelpers as aph
from units import rad2deg
from stfmrHelpers.stfmrAnglePlotUIHelper import get_ipFileLocationsFilesFromUI
if selectFileType == 0:
ipFileLocationsFiles = [get_ipFileLocationsFilesFromUI(analysisMode)]
elif selectFileType == 1:
root = tk.Tk()
root.withdraw()
ipFileLocationsFiles = [File(filedialog.askopenfilename(parent=root,
title='Choose .csv file with input files locations'))]
elif selectFileType == 2:
ipFileLocationsFiles = [
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2959-2\220131\D1_0deg\02_angle-dependence\fittingOutput\angleDependence\MA2959-2-D1_angleDep_input_files.csv'),
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2959-2\220131\D3_45deg\01_angle-dependence\fittingOutput\angleDependence\MA2959-2-D3_angleDep_input_files.csv'),
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2960-2\220202\D1_0deg\003_angle-dependence\fittingOutput\angleDependence\MA2960-2-D1_angleDep_input_files.csv'),
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2960-2\220203\D4_90deg\002_angle-dependence\pos_field\fittingOutput\angleDependence\MA2960-2-D4_angleDep_input_files.csv')
File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2959-2\220131\D1_0deg\02_angle-dependence\fittingOutput\angleDependence\MA2959-2-D1_angleDep_input_files.csv')
]
else:
raise ValueError(f'Select files type "{selectFileType}" not defined')
inputFiles = []
ipFileLocations = []
for ipFileLocationsFile in ipFileLocationsFiles:
# Get input file locations
ipFileLocations = read_csv_Series(ipFileLocationsFile.fileDirName)
ipAngleDepFittingSummaryFile = File(ipFileLocations['angle dependence fitting summary'])
# Get input data
inputData = pd.read_csv(ipAngleDepFittingSummaryFile.fileDirName,index_col=False)
if analysisMode == 4:
# Get additional data from XY measurement
ipAngleDepFittingXYSummaryFile = File(ipFileLocations['angle dependence fitting summary transversal'])
inputDataXY = pd.read_csv(ipAngleDepFittingXYSummaryFile.fileDirName,index_col=False)
# Extract important collumns
if voltageMagnitude == 'mu':
y_label = 'V ($\mu$V)'
voltageDivider = 1e-6
if plotPhiMode == 0:
try:
x = inputData['Angle (deg)']
except:
try:
x = inputData['fieldAngle (deg)']
except:
raise ValueError
x_label = '$\phi$ (deg)'
Vs = inputData['Vsym (V)']
Vas = inputData['Vas (V)']
if analysisMode == 4:
Vsxx = Vs
Vaxx = Vas
Vsxy = inputDataXY['Vsym (V)']
Vaxy = inputDataXY['Vas (V)']
elif plotPhiMode == 1:
x = inputData.sort_values(by='fieldAngle (deg)')['fieldAngle (deg)']
x_label = '$\phi$ (deg)'
Vs = inputData.sort_values(by='fieldAngle (deg)')['Vsym (V)']
Vas = inputData.sort_values(by='fieldAngle (deg)')['Vas (V)']
# Extract fixed parameters
I = float(inputData['Current (mA)'][0])
P = float(inputData['rf Power (dBm)'][0])
f = float(inputData['Frequency (GHz)'][0])
# Flip sign if defined
if flipSign == True:
Vs *= -1
Vas *= -1
# _________________________________________________________________________
# ANALYSIS MODE 0
if analysisMode == 0:
# Simple data plotting without fit
fig, ax = plt.subplots()
ax.scatter(x, Vs, label='Vs')
ax.scatter(x, Vas, label='Vas')
plt.plot(x, Vs)
plt.plot(x, Vas)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend()
ax.set_xticks(np.arange(0, 361, delta_phi))
ax.set_title('I = {} mA, f = {} GHz, P = {} dBm'.format(I, f, P))
outputFileSubdir = ipAngleDepFittingSummaryFile.fileDir + '/angleDependence/plot-only'
outputFile = File(outputFileSubdir, ipAngleDepFittingSummaryFile.fileNameWOExt + '_anglePlot.png')
outputFile.makeDirIfNotExist()
if saveData is True:
fig.savefig(outputFile.fileDirName, bbox_inches="tight", dpi=plotDpi)
# _________________________________________________________________________
# ANALYSIS MODE 1
elif analysisMode == 1:
''' c-free fitting '''
opFileDir = ipAngleDepFittingSummaryFile.fileDir + '/angleDependence/c-free'
opFileParams = File(opFileDir, 'fitparams_summary.csv')
opParamsSum = | pd.DataFrame() | pandas.DataFrame |
import os
import tensorflow as tf
import math
import numpy as np
import pandas as pd
import glob
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from waymo_open_dataset.protos.scenario_pb2 import Scenario
import matplotlib.patches as patches
import matplotlib.patheffects as path_effects
def get_file_list(filepath):
all_files = sorted(glob.glob(filepath))
segs_name_index = []
for file in all_files:
segment_name = os.path.basename(file)
segs_name_index.append(segment_name[-14:-9])
# print(segs_name_all)
#print(segs_name_index)
return all_files, segs_name_index
global point_has_been_pointed
point_has_been_pointed = []
def plot_top_view_single_pic_map(trj_in,file_index,scenario_id_in, scenario,target_left,target_right,length,trafficlight_lane,lane_turn_right_id_real=[]):
global point_has_been_pointed
#plt.figure(figsize=(10, 7))
fig, ax = plt.subplots(1,2,figsize=(14,7))
plt.xlabel('global center x (m)', fontsize=10)
plt.ylabel('global center y (m)', fontsize=10)
plt.axis('square')
plt.xlim([trj_in['center_x'].min() - 1, trj_in['center_x'].max() + 1])
plt.ylim([trj_in['center_y'].min() - 1, trj_in['center_y'].max() + 1])
title_name = 'Scenario ' + str(scenario_id_in)
plt.title(title_name, loc='left')
plt.xticks(np.arange(round(float(trj_in['center_x'].min())), round(float(trj_in['center_x'].max())), 20),fontsize=5)
plt.yticks(np.arange(round(float(trj_in['center_y'].min())), round(float(trj_in['center_y'].max())), 20), fontsize=5)
#ax = plt.subplots(121)
map_features = scenario.map_features
road_edge_count = 0
lane_count = 0
road_line = 0
all_element_count = 0
for single_feature in map_features:
all_element_count += 1
id_ = single_feature.id
#print("id is %d"%id_)
if list(single_feature.road_edge.polyline)!= []:
road_edge_count += 1
single_line_x = []
single_line_y = []
# print("road_edge id is %d"%single_feature.id)
for polyline in single_feature.road_edge.polyline:
single_line_x.append(polyline.x)
single_line_y.append(polyline.y)
ax[0].plot(single_line_x, single_line_y, color='black', linewidth=0.3) # 道路边界为黑色
if list(single_feature.lane.polyline)!= []:
lane_count += 1
single_line_x = []
single_line_y = []
for polyline in single_feature.lane.polyline:
single_line_x.append(polyline.x)
single_line_y.append(polyline.y)
#z1 = np.polyfit(single_line_x,single_line_y,8)
#p1 = np.poly1d(z1)
#y_hat = p1(single_line_x)
#ax.plot(single_line_x,y_hat,color='green', linewidth=0.5)
if id_ in target_left:
ax[0].plot(single_line_x, single_line_y, color='green', linewidth=0.5)
ax[1].plot(single_line_x, single_line_y, color='green', linewidth=0.5)
elif id_ in target_right:
if id_ in lane_turn_right_id_real: #目标交叉口的右转车道
ax[0].plot(single_line_x, single_line_y, color='red', linewidth=0.5)
ax[1].plot(single_line_x, single_line_y, color='red', linewidth=0.5)
else:
ax[0].plot(single_line_x, single_line_y, color='purple', linewidth=0.5)
ax[1].plot(single_line_x, single_line_y, color='purple', linewidth=0.5) #deeppink
elif id_ in trafficlight_lane: #有信号灯数据的车道
ax[0].plot(single_line_x, single_line_y, color='deeppink', linewidth=0.5)
ax[1].plot(single_line_x, single_line_y, color='deeppink', linewidth=0.5)
else:
ax[0].plot(single_line_x, single_line_y, color='blue', linewidth=0.5) # 道路中心线为蓝色
if (single_line_x[0],single_line_y[0]) not in point_has_been_pointed:
ax[0].text(single_line_x[0], single_line_y[0], id_, fontsize=1.5)
point_has_been_pointed.append((single_line_x[0],single_line_y[0]))
else:
ax[0].text(single_line_x[0]-5, single_line_y[0]-5, id_,color='red', fontsize=1.5)
point_has_been_pointed.append((single_line_x[0]-5, single_line_y[0]-5))
if list(single_feature.road_line.polyline)!=[]:
road_line += 1
single_line_x = []
single_line_y = []
for polyline in single_feature.road_line.polyline:
single_line_x.append(polyline.x)
single_line_y.append(polyline.y)
ax[0].plot(single_line_x, single_line_y, color='black', linestyle=':', linewidth=0.3) # 道路标线为 虚线
fig_save_name = 'E:/Result_save/figure_save/intersection_topo_figure_test/top_view_segment_' + str(
file_index) + '_scenario_' + str(
scenario_id_in) + '_trajectory.jpg'
#print(fig_save_name)
plt.savefig(fig_save_name, dpi=600)
#plt.show()
plt.close('all')
return road_edge_count, lane_count, road_line,all_element_count
def get_lane_min_dis(single_scenario_all_feature,map_features_id_list,ego_lane_id,other_lanes,connect_type):
ego_index = map_features_id_list.index(ego_lane_id)
ego_lane_info = single_scenario_all_feature[ego_index]
lane_inter_dis = [] #用于记录本车道最尽头和目标车道最尽头之间的距离,用于判定是否为交叉口内部
ego_lane_point = ()
other_lane_point = []
for other_lane_id in other_lanes:
other_lane_index = map_features_id_list.index(other_lane_id)
other_lane_info = single_scenario_all_feature[other_lane_index]
if connect_type == 'entry':
x1,y1 = ego_lane_info.lane.polyline[0].x,ego_lane_info.lane.polyline[0].y
x2,y2 = other_lane_info.lane.polyline[0].x,other_lane_info.lane.polyline[0].y
ego_lane_point = (ego_lane_info.lane.polyline[0].x,ego_lane_info.lane.polyline[0].y) #如果是进入的关系,则返回该车道的第一个点
other_lane_point.append((other_lane_info.lane.polyline[0].x,other_lane_info.lane.polyline[0].y))
if connect_type == 'exit':
x1, y1 = ego_lane_info.lane.polyline[-1].x, ego_lane_info.lane.polyline[-1].y
x2, y2 = other_lane_info.lane.polyline[-1].x, other_lane_info.lane.polyline[-1].y
ego_lane_point = (
ego_lane_info.lane.polyline[-1].x, ego_lane_info.lane.polyline[-1].y) # 如果是驶出的关系,则返回该车道的最后一个点
other_lane_point.append((other_lane_info.lane.polyline[-1].x, other_lane_info.lane.polyline[-1].y))
dis = np.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2))
lane_inter_dis.append(dis)
return lane_inter_dis,ego_lane_point,other_lane_point
def cal_angle(x1,y1,x2,y2):
if x2!=x1:
angle = (math.atan((y2-y1)/(x2-x1)))*180/np.pi
else:
angle = 90 #避免斜率不存在的情况
return angle
def get_lane_angle_chane(polyline_list,ego_lane_id): #计算
angle_start = 0
angle_end = 0
length = len(polyline_list)
x_list = []
y_list = []
turn_type = 'straight'
x1, y1 = polyline_list[0].x, polyline_list[0].y
x4, y4 = polyline_list[-1].x, polyline_list[-1].y
for polyline in polyline_list:
x_list.append(polyline.x)
y_list.append(polyline.y)
try:
# print(polyline_list)
x2, y2 = polyline_list[3].x, polyline_list[3].y
x3, y3 = polyline_list[-3].x, polyline_list[-3].y
angle_start = cal_angle(x1,y1, x2, y2)
angle_end = cal_angle(x3,y3,x4,y4)
delta_angle = angle_end - angle_start # 大于0为左转,小于0为右转
except:
angle_start = angle_end = delta_angle = None
#判断左右转信息
index_mid = int(length/2)
x_mid = polyline_list[index_mid].x
y_mid = polyline_list[index_mid].y
# p1 = np.array((x_mid-x1,y_mid-y1))
# p2 = np.array((x4-x_mid,y4-y_mid))
p3 = (x_mid-x1)*(y4-y_mid)-(y_mid-y1)*(x4-x_mid)
#print(p3)
if p3 > 0:
turn_type = 'left'
elif p3<0:
turn_type = 'right'
#print("Turn type is %s"%turn_type)
return angle_start,angle_end,delta_angle,turn_type
def cal_lane_slpoe(polyline):
x1,y1 = polyline[0].x,polyline[0].y #直线起点xy坐标
x2,y2 = polyline[-1].x,polyline[-1].y #直线终点xy坐标
if x2-x1 != 0:
slope = (y2-y1)/(x2-x1)
else:
slope = 90
return slope
def map_topo_info_extract(map_features): #提取车道连接关系等拓扑信息
single_feature_all_lane_polyline = [] # 一个scenario中所有的车道信息(仅包括散点)
lane_id_all = [] #一个scenario中所有车道的ID信息
all_lane_entry_exit_info = [] # 记录所有车道的进出车道信息
single_map_dict = {}
lane_turn_left_id = [] #记录交叉口内部所有左转车道ID
lane_turn_right_id = [] #记录交叉口内部所有右转车道ID
for single_feature in map_features: # 先将每根车道线的信息保存成列表
single_scenario_all_feature.append(single_feature)
map_features_id_list.append(single_feature.id)
for single_feature in map_features:
single_lane_entry_exit_info = {} # 记录一个车道的进出车道信息,包括与之相邻车道终点与该车道终点的距离(用于和交叉口尺寸阈值进行判定)
if list(single_feature.lane.polyline) != []:
ego_lane_id = single_feature.id
entry_lanes = single_feature.lane.entry_lanes
exit_lanes = single_feature.lane.exit_lanes
entry_lanes_dis, ego_lane_point_entry, entry_lane_point = get_lane_min_dis(single_scenario_all_feature,
map_features_id_list,
ego_lane_id, entry_lanes,
'entry')
exit_lanes_dis, ego_lane_point_exit, exit_lane_point = get_lane_min_dis(single_scenario_all_feature,
map_features_id_list, ego_lane_id,
exit_lanes, 'exit')
angle_start, angle_end, delta_angle, turn_type = get_lane_angle_chane(single_feature.lane.polyline,
ego_lane_id) # 该线段的角度变化值
single_lane_entry_exit_info['file_index'] = file_index
single_lane_entry_exit_info['scenario_id'] = scenario_label
single_lane_entry_exit_info['lane_id'] = ego_lane_id
single_lane_entry_exit_info['angle_start'] = angle_start
single_lane_entry_exit_info['angle_end'] = angle_end
single_lane_entry_exit_info['ego_lane_angle_change'] = delta_angle
single_lane_entry_exit_info['is_a_turn'] = ''
if delta_angle:
if abs(delta_angle) > 35: #由50暂时修正为35
# if 120>delta_angle >50: #为左转
# if delta_angle > 50: # 为左转
if turn_type == 'left':
single_lane_entry_exit_info['is_a_turn'] = 'left'
lane_turn_left_id.append(ego_lane_id)
# elif -120<delta_angle <-50:
# elif delta_angle < -50:
elif turn_type == 'right':
single_lane_entry_exit_info['is_a_turn'] = 'right'
lane_turn_right_id.append(ego_lane_id) # 整个交叉口右转车道只有四根
else:
single_lane_entry_exit_info['is_a_turn'] = 'straight'
else:
single_lane_entry_exit_info['is_a_turn'] = 'straight'
if single_lane_entry_exit_info['is_a_turn'] == 'straight':
single_lane_entry_exit_info['lane_slope'] = cal_lane_slpoe(single_feature.lane.polyline) #如果是直线车道,计算这条车道的斜率,用于后续计算交叉口角度
# single_lane_entry_exit_info['is a turn'] = turn_type
#相连接车道信息提取
single_lane_entry_exit_info['ego_lane_point_entry'] = ego_lane_point_entry
single_lane_entry_exit_info['ego_lane_point_exit'] = ego_lane_point_exit
single_lane_entry_exit_info['entry_lanes'] = entry_lanes
single_lane_entry_exit_info['entry_lanes_dis'] = entry_lanes_dis
single_lane_entry_exit_info['entry_lane_point'] = entry_lane_point
single_lane_entry_exit_info['exit_lanes'] = exit_lanes
single_lane_entry_exit_info['exit_lanes_dis'] = exit_lanes_dis
single_lane_entry_exit_info['exit_lane_point'] = exit_lane_point
#相邻车道信息提取
lane_index_all = len(list(single_feature.lane.polyline)) #这条路被分成的小片段序列总数(即散点总数)
single_lane_entry_exit_info['left_neighbors_id'] = -1 #先初始化
single_lane_entry_exit_info['right_neighbors_id'] = -1
if list(single_feature.lane.left_neighbors) != []: #左边有车道
left_neighbors_temp = list(single_feature.lane.left_neighbors)
flag1 = 0
for left_neighbor in left_neighbors_temp:
# print('index')
# print(ego_lane_id)
# print(lane_index_all)
# print(left_neighbor.self_end_index)
if abs(left_neighbor.self_end_index - lane_index_all)<2:
left_neighbors_id = left_neighbor.feature_id #记录满足条件的那条左边相邻车道的ID
#print("left_neighbors %d" % left_neighbors_id)
flag1 = 1
break
if flag1 ==1:
single_lane_entry_exit_info['left_neighbors_id'] = left_neighbors_id
#print(left_neighbors_id)
if list(single_feature.lane.right_neighbors) != []: #右边右车道
right_neighbors_temp = list(single_feature.lane.right_neighbors)
flag2 = 0
for right_neighbor in right_neighbors_temp:
#print('index222')
#print(lane_index_all)
#print(left_neighbor.self_end_index)
if abs(right_neighbor.self_end_index -lane_index_all)<2:
right_neighbors_id = right_neighbor.feature_id
#print("right_neighbors %d"%right_neighbors_id)
flag2 = 1
break
if flag2 == 1:
single_lane_entry_exit_info['right_neighbors_id'] = right_neighbors_id
#将一些信息重新记录,便于数据检索和处理
lane_id_all.append(single_feature.id)
all_lane_entry_exit_info.append(single_lane_entry_exit_info)
single_feature_all_lane_polyline.append((single_feature.id, single_feature.lane.polyline))
single_map_dict[single_feature.id] = single_feature.lane.polyline # 使用字典进行检索,得到所有车道的坐标点信息
# print('qqqq')
# print(single_scenario_all_lane_entry_exit_info)
# print(single_map_dict,lane_turn_left_id,lane_turn_right_id)
return all_lane_entry_exit_info,single_map_dict,lane_turn_left_id,lane_turn_right_id
def intersection_angle_cal(df_all_lan_topo_info):
from sklearn.cluster import KMeans
intersection_angle = 0
slope_list = pd.unique(df_all_lan_topo_info[df_all_lan_topo_info['is_a_turn']== 'straight']['lane_slope'].tolist())
#print('slope_list:')
#print(slope_list)
estimator = KMeans(n_clusters=2) # 构造聚类器
estimator.fit(np.array(slope_list).reshape(-1, 1)) # 聚类
label_pred = estimator.labels_ # 获取聚类标签
k1 = np.mean(slope_list[label_pred==0]) #这一类斜率的平均值计算
k2 = np.mean(slope_list[label_pred==1])
#print('k1:%f,k2:%f'%(k1,k2))
intersection_angle = math.atan(abs((k2-k1)/(1+k1*k2)))*180/np.pi
return intersection_angle
class Point():
def __init__(self,x,y):
self.x = x
self.y = y
def get_point_order(points): #得到正确的点的连线顺序,以得到正确的多边形
points_new = []
points_new_plus = [] #将交叉口范围稍微扩大
left_point = {} #某个点左侧最近的点
right_point = {} #某个点右侧最近的点
for A in points:
angle_vec = 0
for B in points:
for C in points:
if (A.x!=B.x and A.y!=B.y)and (C.x!=B.x and C.y!=B.y) and (C.x!=A.x and C.y!=A.y):
vec_AB = Point(B.x-A.x,B.y-A.y)
vec_AC = Point(C.x-A.x,C.y-A.y)
vec_BA = Point(-vec_AB.x,-vec_AB.y)
#print(vec_AB.x,vec_AB.y,vec_AC.x,vec_AC.y,A.x,A.y,C.x,C.y)
#print(abs(math.sqrt(vec_AB.x**2+vec_AB.y**2)*math.sqrt(vec_AC.x**2+vec_AC.y**2)))
cos_angle = (vec_AB.x*vec_AC.x+vec_AB.y*vec_AC.y)/abs(math.sqrt(vec_AB.x**2+vec_AB.y**2)*math.sqrt(vec_AC.x**2+vec_AC.y**2))
#print(cos_angle)
angle_vec_temp = abs(math.acos(cos_angle)*180/np.pi)
#print(angle_vec_temp)
if angle_vec_temp > angle_vec:
angle_vec = angle_vec_temp
#print(angle_vec)
p5 = vec_BA.x*vec_AC.y - vec_AC.x*vec_BA.y
if p5>0:
left_point[points.index(A)] = points.index(B)
right_point[points.index(A)] = points.index(C)
elif p5<0:
left_point[points.index(A)] = points.index(C)
right_point[points.index(A)] = points.index(B)
A = points[0]
points_new.append((A.x,A.y))
points_new_plus.append((A.x+5,A.y+5))
points_new_index = [0]
for i in range(20):
A = points[left_point[points.index(A)]]
points_new.append((A.x,A.y))
points_new_plus.append((A.x + 5, A.y + 5))
if left_point[points.index(A)] == 0:
break
points_new_index.append(left_point[points.index(A)])
return points_new,points_new_plus
def rayCasting(p, poly): #判断一个点是否在多边形内部
px,py = p[0],p[1]
flag = -1
i = 0
l = len(poly)
j = l - 1
# for(i = 0, l = poly.length, j = l - 1; i < l; j = i, i++):
while i < l:
sx = poly[i][0]
sy = poly[i][1]
tx = poly[j][0]
ty = poly[j][1]
# 点与多边形顶点重合
if (sx == px and sy == py) or (tx == px and ty == py):
flag = 1
# 判断线段两端点是否在射线两侧
if (sy < py and ty >= py) or (sy >= py and ty < py):
# 线段上与射线 Y 坐标相同的点的 X 坐标
x = sx + (py - sy) * (tx - sx) / (ty - sy)
# 点在多边形的边上
if x == px:
flag = 1
# 射线穿过多边形的边界
if x > px:
flag = -flag
j = i
i += 1
# 射线穿过多边形边界的次数为奇数时点在多边形内
return flag
def judge_lane_in_intersection(lane_polyline,intersection_range):
flag = 0 #如果flag=1 则这条车道在交叉口内部,否则不在
point_start = (lane_polyline[0].x,lane_polyline[0].y)
point_end = (lane_polyline[-1].x,lane_polyline[-1].y)
if (rayCasting(point_start, intersection_range) == 1) and (rayCasting(point_end, intersection_range) == 1):
flag = 1
return flag
def get_one_direction_lane_info(right_lane_id,df_all_lan_topo_info,single_map_dict,lane_turn_left_id):
lane_in_num,lane_out_num = 0,0 #这里统计的出口道信息是进口道方向逆时针转90°后方向的出口道
lane_in_id,lane_out_id = [],[]
entry_lane_id = df_all_lan_topo_info[df_all_lan_topo_info['lane_id']==right_lane_id]['entry_lanes'].iloc[0][0]
exit_lane_id = df_all_lan_topo_info[df_all_lan_topo_info['lane_id']==right_lane_id]['exit_lanes'].iloc[0][0]
#----------------进口道信息提取--------------------
#车道ID及数量
#右转车道右侧的车道一般是自行车车道,暂时不做处理
lane_in_id.append(entry_lane_id)
lane_in_left = df_all_lan_topo_info[df_all_lan_topo_info['lane_id'] == entry_lane_id]['left_neighbors_id']
while (lane_in_left.tolist()[0] != -1):#如果左侧没有车道,则为单车道
flag = 0
lane_in_left = df_all_lan_topo_info[df_all_lan_topo_info['lane_id'] == entry_lane_id]['left_neighbors_id']
# print('ssddffs')
#print(lane_in_left.tolist())
# print(lane_in_left.tolist())
# print(lane_in_left.values)
# print(lane_in_left.tolist()[0] ==-1)
if (lane_in_left.tolist()[0] == -1):
break
lane_in_id.append(lane_in_left.tolist()[0])
entry_lane_id = lane_in_left.tolist()[0]
#print(entry_lane_id)
lane_in_num = len(lane_in_id)
#-------------------出口道信息提取---------------------------
# 车道ID及数量
lane_out_id.append(exit_lane_id)
lane_out_left = df_all_lan_topo_info[df_all_lan_topo_info['lane_id'] == exit_lane_id]['left_neighbors_id']
while (lane_out_left.tolist()[0] != -1):#如果左侧没有车道,则为单车道
flag = 0
lane_out_left = df_all_lan_topo_info[df_all_lan_topo_info['lane_id'] == exit_lane_id]['left_neighbors_id']
if (lane_out_left.tolist()[0] == -1):
break
lane_out_id.append(lane_out_left.tolist()[0])
exit_lane_id = lane_out_left.tolist()[0]
lane_out_num = len(lane_out_id)
return lane_in_num, lane_in_id, lane_out_num, lane_out_id
def get_lane_width(df_all_lan_topo_info,single_map_dict,lane_in_num, lane_in_id, lane_out_num, lane_out_id):
lane_in_width, lane_out_width = 0,0
#计算进口道车道宽度
#车道宽度计算
if lane_in_num>1:
width_in_sum = 0
for i in range(lane_in_num-1):
lane_in_cal_1_id = lane_in_id[i]
lane_in_cal_2_id = lane_in_id[i + 1]
x1,y1 = single_map_dict[lane_in_cal_1_id][-1].x, single_map_dict[lane_in_cal_1_id][-1].y #进口道这里是最后一个点,出口道这里应该第1个点
x2, y2 = single_map_dict[lane_in_cal_2_id][-1].x, single_map_dict[lane_in_cal_2_id][-1].y
#print("one lane width is %f"%(math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)))
width_in_sum += math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
lane_in_width = width_in_sum / (lane_in_num-1)
#print("lane width ave = %f"%lane_in_width)
elif lane_in_num == 1:
lane_in_width = 3.5 #这里需要统筹对向出口道才行
#计算出口道车道宽度
if lane_out_num > 1:
width_out_sum = 0
for i in range(lane_out_num-1):
lane_out_cal_1_id = lane_out_id[i]
lane_out_cal_2_id = lane_out_id[i+1]
x1,y1 = single_map_dict[lane_out_cal_1_id][0].x,single_map_dict[lane_out_cal_1_id][0].y
x2, y2 = single_map_dict[lane_out_cal_2_id][0].x, single_map_dict[lane_out_cal_2_id][0].y
width_out_sum += math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
lane_out_width = width_out_sum / (lane_out_num-1)
elif lane_out_num ==1:
lane_out_width = 3.5
return lane_in_width,lane_out_width
def intersection_info_extract(df_all_lane_topo_info,single_map_dict,lane_turn_left_id_ori,lane_turn_right_id_ori,intersection_center_loc,length_1):
intersection_info = {}
intersection_info['file_index'] = df_all_lane_topo_info['file_index'].iloc[0]
intersection_info['scenario_id'] = df_all_lane_topo_info['scenario_id'].iloc[0]
intersection_info['intersection_center_point'] = intersection_center_loc # 交互车辆所在的中心位置
#---------------------筛选该目标交叉口范围的所有左转、右转车道------------------------
length_1 = 100 #初步将交叉口范围定在50m,后面可能需要更改
A = (intersection_center_loc[0]-length_1/2,intersection_center_loc[1]+length_1/2)
B = (intersection_center_loc[0]+length_1/2,intersection_center_loc[1]+length_1/2)
C = (intersection_center_loc[0]+length_1/2,intersection_center_loc[1]-length_1/2)
D = (intersection_center_loc[0]-length_1/2,intersection_center_loc[1]-length_1/2)
intersection_range_approximate = [A,B,C,D]
lane_turn_left_id = []
lane_turn_right_id = []
#筛选目标交叉口内部的左转、右转车道
for left_id in lane_turn_left_id_ori:
if (judge_lane_in_intersection(single_map_dict[left_id],intersection_range_approximate)==1):
lane_turn_left_id.append(left_id)
for right_id in lane_turn_right_id_ori:
if (judge_lane_in_intersection(single_map_dict[right_id],intersection_range_approximate)==1): #!!!
lane_turn_right_id.append(right_id)
# print('mmmm')
# print(lane_turn_left_id_ori, lane_turn_right_id_ori)
# print(lane_turn_left_id,lane_turn_right_id)
intersection_info['lane_id_turn_left_inside'] = lane_turn_left_id
intersection_info['lane_id_turn_right_inside'] = lane_turn_right_id
#以下的处理全部基于所有右转、左转车道的分流点、合流点进行
merging_points_left = []
merging_points_right = [] #右转车道的合流点
diverging_points_left = []
diverging_points_right = [] #右转车道的分流点
all_lane_id = pd.unique(df_all_lane_topo_info['lane_id'].tolist())
points_key = [] #右转合流点、分流点集合
for right_lane_id in lane_turn_right_id:
#提取所有与右转车道连接的分流点、合流点
point_start_x = single_map_dict[right_lane_id][0].x #起点,即分流点
point_start_y = single_map_dict[right_lane_id][0].y
point_end_x = single_map_dict[right_lane_id][-1].x #终点,即合流点
point_end_y = single_map_dict[right_lane_id][-1].y
merging_points_right.append((point_end_x,point_end_y))
diverging_points_right.append((point_start_x,point_start_y))
point_start = Point(point_start_x,point_start_y)
point_end = Point(point_end_x,point_end_y)
points_key.append(point_start)
points_key.append(point_end)
#得到该右转车道的进入车道和驶出车道
#print('test_1:')
#print(df_single_scenario_lane_topo_info[df_single_scenario_lane_topo_info['lane_id']==right_lane_id]['entry_lanes'])
entry_lane_id = df_all_lane_topo_info[df_all_lane_topo_info['lane_id'] == right_lane_id]['entry_lanes'].iloc[0]
exit_lane_id = df_all_lane_topo_info[df_all_lane_topo_info['lane_id'] == right_lane_id]['exit_lanes'].iloc[0]
#判断与右转车道相连接的车道是进口道还是出口道
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == entry_lane_id, 'entry_or_exit'] = 'entry'
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == exit_lane_id, 'entry_or_exit'] = 'exit'
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == right_lane_id, 'entry_or_exit'] = 'inside' #右转车道自身位于交叉口内部
#将进口道、出口道的车道功能标记一下
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == entry_lane_id, 'lane_function'] = 'right' #记录车道功能,本身是直行车道,功能是用于右转
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == exit_lane_id, 'lane_function'] = 'right'
df_all_lane_topo_info.loc[:, 'entry_or_exit'] = ''
#print(points_key,lane_turn_right_id)
points_key_new, points_key_new_plus = get_point_order(points_key) # 得到正确的能够将所有右转合流、分流点连接为多边形的顺序 points_key_new_plus 将整个多边形向外扩展5m,以满足冗余
for lane_in_id in all_lane_id:
if (lane_in_id not in lane_turn_left_id) and (lane_in_id not in lane_turn_right_id):
if judge_lane_in_intersection(single_map_dict[lane_in_id], points_key_new_plus) ==1:
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == lane_in_id, 'entry_or_exit'] = 'inside' # 车道位于交叉口内部
for left_lane_id in lane_turn_left_id: #对于所有的左转车道
#df_single_scenario_lane_topo_info[df_single_scenario_lane_topo_info['lane_id'] == left_lane_id]['entry_or_exit'] = 'inside' # 左转车道自身位于位于交叉口内部
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == left_lane_id, 'entry_or_exit'] = 'inside' # 左转车道自身位于位于交叉口内部
entry_lane_id = df_all_lane_topo_info[df_all_lane_topo_info['lane_id'] == left_lane_id]['entry_lanes'].iloc[0]
exit_lane_id = df_all_lane_topo_info[df_all_lane_topo_info['lane_id'] == left_lane_id]['exit_lanes'].iloc[0]
#记录进出口道功能
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == entry_lane_id, 'entry_or_exit'] = 'entry'
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == exit_lane_id, 'entry_or_exit'] = 'exit'
#标记车道功能
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == entry_lane_id, 'lane_function'] = 'left' #本身是直行车道,功能是用于左转
df_all_lane_topo_info.loc[df_all_lane_topo_info['lane_id'] == exit_lane_id, 'lane_function'] = 'left'
#print('sss')
#print(df_single_scenario_lane_topo_info['entry_or_exit'])
intersection_info['intersection_anlge'] = round(intersection_angle_cal(df_all_lane_topo_info), 2)
#交叉口类型判断,这里需要修正,需要讨论更多的类型
if len(merging_points_right) >= 4 and len(diverging_points_right)>=4 :
if 105>=intersection_info['intersection_anlge']>=75:
intersection_info['Type'] = 'Cross' #十字型交叉口
elif 0<intersection_info['intersection_anlge']<75 or 105<intersection_info['intersection_anlge']<180:
intersection_info['Type'] = 'X' #X型交叉口
elif len(merging_points_right) >= 2 and len(diverging_points_right)>= 2 :
intersection_info['Type'] = 'T' #T型交叉口
#----------Extract the lane number and width of the intersection
extract_direction_index = 1
#print(lane_turn_right_id)
for right_lane_id in lane_turn_right_id:
lane_in_num, lane_in_id, lane_out_num, lane_out_id = get_one_direction_lane_info(right_lane_id, df_all_lane_topo_info, single_map_dict, lane_turn_left_id)
lane_in_width,lane_out_width = get_lane_width(df_all_lane_topo_info, single_map_dict, lane_in_num, lane_in_id, lane_out_num, lane_out_id) #计算车道宽度
#print(lane_in_num, lane_in_id, lane_in_width,lane_out_num, lane_out_id, lane_out_width)
#进口道车道数量、车道id、车道宽度信息记录
intersection_info['direction_' + str(extract_direction_index) + '_in_lane_num'] = lane_in_num
intersection_info['direction_' + str(extract_direction_index) + '_in_lane_id_list'] = lane_in_id
intersection_info['direction_' + str(extract_direction_index) + '_in_lane_width'] = round(lane_in_width,2)
#出口道车道数量、车道id、车道宽度信息记录
intersection_info['direction_' + str(extract_direction_index) + '_out_lane_num'] = lane_out_num
intersection_info['direction_' + str(extract_direction_index) + '_out_lane_id_list'] = lane_out_id
intersection_info['direction_' + str(extract_direction_index) + '_out_lane_width'] = round(lane_out_width)
extract_direction_index += 1
return intersection_info, df_all_lane_topo_info,lane_turn_right_id
def map_lane_point_extract(map_features,file_index,scenario_label):
map_point_list = []
for single_feature in map_features:
if list(single_feature.road_edge.polyline) != []: #类型为road_edge
for polyline in single_feature.road_edge.polyline:
dic_map = {}
dic_map['file_index'] = file_index
dic_map['scenario_label'] = scenario_label
dic_map['line_id'] = single_feature.id
dic_map['type'] = 'road_edge'
if single_feature.road_edge.type == 0:
dic_map['line_type'] = 'UNKNOWN'
elif single_feature.road_edge.type ==1:
dic_map['line_type'] = 'ROAD_EDGE_BOUNDARY'
elif single_feature.road_edge.type == 2:
dic_map['line_type'] = 'ROAD_EDGE_MEDIAN'
dic_map['point_x'] = polyline.x
dic_map['point_y'] = polyline.y
dic_map['point_z'] = polyline.z
map_point_list.append(dic_map)
if list(single_feature.lane.polyline) != []:
for polyline in single_feature.lane.polyline:
dic_map = {}
dic_map['file_index'] = file_index
dic_map['scenario_label'] = scenario_label
dic_map['line_id'] = single_feature.id
dic_map['type'] = 'lane'
if single_feature.lane.type == 0:
dic_map['line_type'] = 'UNDEFINED'
elif single_feature.lane.type ==1:
dic_map['line_type'] = 'FREEWAY'
elif single_feature.lane.type == 2:
dic_map['line_type'] = 'SURFACE_STREET'
elif single_feature.lane.type == 3:
dic_map['line_type'] = 'BIKE_LANE'
dic_map['point_x'] = polyline.x
dic_map['point_y'] = polyline.y
dic_map['point_z'] = polyline.z
dic_map['entry_lanes'] = single_feature.lane.entry_lanes
dic_map['exit_lanes'] = single_feature.lane.exit_lanes
# dic_map['left_neighbors_id'] = single_feature.lane.left_neighbors.feature_id
# dic_map['right_neighbors_id'] = single_feature.lane.right_neighbors.feature_id
map_point_list.append(dic_map)
if list(single_feature.road_line.polyline) != []:
for polyline in single_feature.road_line.polyline:
dic_map = {}
dic_map['file_index'] = file_index
dic_map['scenario_label'] = scenario_label
dic_map['line_id'] = single_feature.id
dic_map['type'] = 'road_line'
if single_feature.road_line.type == 0:
dic_map['line_type'] = 'UNKNOWN'
elif single_feature.road_line.type ==1:
dic_map['line_type'] = 'BROKEN_SINGLE_WHITE'
elif single_feature.road_line.type == 2:
dic_map['line_type'] = 'SOLID_SINGLE_WHITE'
elif single_feature.road_line.type == 3:
dic_map['line_type'] = 'SOLID_DOUBLE_WHITE'
elif single_feature.road_line.type == 4:
dic_map['line_type'] = 'BROKEN_SINGLE_YELLOW'
elif single_feature.road_line.type == 5:
dic_map['line_type'] = 'BROKEN_DOUBLE_YELLOW'
elif single_feature.road_line.type == 6:
dic_map['line_type'] = 'SOLID_SINGLE_YELLOW'
elif single_feature.road_line.type == 7:
dic_map['line_type'] = 'SOLID_DOUBLE_YELLOW'
elif single_feature.road_line.type == 8:
dic_map['line_type'] = 'PASSING_DOUBLE_YELLOW'
dic_map['point_x'] = polyline.x
dic_map['point_y'] = polyline.y
dic_map['point_z'] = polyline.z
map_point_list.append(dic_map)
df_single_scenario_map_point = pd.DataFrame(map_point_list)
return df_single_scenario_map_point
if __name__ == '__main__':
#-----------------------load_data ---------------------
test_state = 1
filepath_oridata = 'E:/waymo_motion_dataset/training_20s.tfrecord-*-of-01000'
all_file_list, file_index_list = get_file_list(filepath_oridata)
scenario_all_count = 0
all_intersection_info = []
length = 80 #目标交叉口的距离范围,需要进行调整
target_scenario = 16
target_segment = 9
for i in tqdm(range(len(file_index_list))):
single_segment_all_scenario_all_lane_topo_info = []
file_index = file_index_list[i]
segment_file = all_file_list[i]
print('Now is the file:%s' % file_index)
if file_index == '00000':
segment_id = 0
else:
segment_id = eval(file_index.strip('0'))
if test_state == 1 :
if segment_id < target_segment:
continue
elif segment_id > target_segment:
break
filepath_trj = 'E:/Result_save/data_save/all_scenario_all_objects_info/'+ file_index +'_all_scenario_all_object_info_1.csv'
seg_trj = pd.read_csv(filepath_trj)
single_seg_all_scenario_id = | pd.unique(seg_trj['scenario_label']) | pandas.unique |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index= | pd.Float64Index([10.0, 11.0, 12.0], dtype='float64') | pandas.Float64Index |
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
import scanpy as sc
import json
from scipy.stats import zscore
import numpy as np
#----------------------------------------------------------------
f_gl='./out/a01_heatmap_01_pp/gene.json'
f_ada='./out/a00_p7_04_anno/p7.h5ad'
f_meta='./raw/meta_info.csv'
fd_out='./out/a01_heatmap_02_plot'
l_cell=['IHC', 'OHC', 'Pillar', 'Deiter']
#--------------------------------------------------------
Path(fd_out).mkdir(exist_ok=True, parents=True)
#color map
df_meta=pd.read_csv(f_meta, index_col=0)
dic_cmap=df_meta.to_dict()['color']
#---------------------------------------------------------
def plot_top(df, f_out, l_cell=l_cell, sz=(6,6), dic_cmap=dic_cmap):
cmap=[dic_cmap[i] for i in l_cell]
#clean
df=df.loc[:, ['anno']]
df['anno']=df.anno.cat.codes
df=df.loc[:, ['anno']].T
#plot
fig, ax=plt.subplots(figsize=sz)
ax=sns.heatmap(df, cmap=cmap, cbar=False)
#adjust
ax.xaxis.label.set_visible(False)
ax.yaxis.label.set_visible(False)
plt.xticks([])
plt.yticks([])
#save
plt.tight_layout()
plt.savefig(f_out, dpi=300)
plt.close()
return
def plot_hm(df, f_out, size=(10,15), vmax=None, vmin=-0.3, y=14):
df=df.drop_duplicates()
#2. heatmap
fig, ax=plt.subplots(figsize=size)
ax=sns.heatmap(df, cmap='Purples', vmax=vmax, vmin=vmin, cbar=True)
#3. adjust
ax.xaxis.label.set_visible(False)
ax.yaxis.label.set_visible(False)
plt.xticks([])
plt.yticks(np.arange(0.5, df.shape[0]+0.5, 1), df.index.tolist(), fontsize=y, rotation=0, weight='medium')
#4. save
plt.tight_layout()
plt.savefig(f_out, dpi=300)
plt.close()
return
############################################################################
#load
ada=sc.read(f_ada)
with open(f_gl, 'r') as f:
dic_gene=json.load(f)
#get gene list
l_gene=[]
l=l_cell+['Others']
for cell in l:
l_gene.extend(dic_gene[cell])
#make df
ada=ada[:, l_gene]
df=pd.DataFrame(ada.X, index=ada.obs.index, columns=ada.var.index)
df['anno']=ada.obs['anno']
df=df.loc[df['anno'].isin(l_cell), :]
df['anno']= | pd.Categorical(df['anno'], categories=l_cell, ordered=True) | pandas.Categorical |
from pathlib import Path
from tqdm.notebook import tqdm
from tqdm import trange
import pickle
import nltk
import math
import os
import random
import re
import torch
import torch.nn as nn
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import (DataLoader, RandomSampler, WeightedRandomSampler, SequentialSampler, TensorDataset)
from transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer,
XLMConfig, XLMForSequenceClassification, XLMTokenizer,
XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer,
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
BartConfig, BartTokenizer, BartForSequenceClassification,
LongformerConfig, LongformerForSequenceClassification, LongformerTokenizer,
AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer,
ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer,
ReformerConfig, ReformerForSequenceClassification, ReformerTokenizer,
MobileBertConfig, MobileBertForSequenceClassification, MobileBertTokenizer,
DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer,
AutoTokenizer, AutoModel, AutoModelForSequenceClassification,
)
import sys
import warnings
from collections import namedtuple, OrderedDict
from functools import partial
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import _document_frequency
from sklearn.utils.validation import check_is_fitted
from pathlib import Path
from tqdm import tqdm
import pandas as po
import numpy as np
from sklearn.metrics.pairwise import cosine_distances
from paths import get_path_predict, get_path_df_scores, get_path_q
from rank import maybe_concat_texts, BM25Vectorizer, deduplicate_combos, remove_combo_suffix, add_missing_idxs, get_ranks, get_preds
SEP = "#" * 100 + "\n"
MODE_TRAIN = "train"
MODE_DEV = "dev"
MODE_TEST = "test"
def obtain_useful(path_data, path_tables, mode='train',top_k = 500):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
df_exp = get_df_explanations(path_tables, path_data)
uid2idx = {uid: idx for idx, uid in enumerate(df_exp.uid.tolist())}
uids = df_exp.uid.apply(remove_combo_suffix).values
path_q = get_path_q(path_data, mode)
df = get_questions(str(path_q), uid2idx, path_data)
ranks = get_ranks(df, df_exp, use_embed=False, use_recursive_tfidf=True)
preds = get_preds(ranks, df, df_exp)
df_exp_copy = df_exp.set_index('uid')
uid2text = df_exp_copy['text'].to_dict()
return df, df_exp, uids, uid2idx, uid2text, ranks, preds
def obtain_model_names_and_classes(model_name='roberta', model_type='roberta-base'):
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
'bart': (BartConfig, BartForSequenceClassification, BartTokenizer),
'longformer':(LongformerConfig, LongformerForSequenceClassification, LongformerTokenizer),
'albert':(AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
'electra':(ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer),
'reformer':(ReformerConfig, ReformerForSequenceClassification, ReformerTokenizer),
'distilbert':(DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
'scibert':( AutoModel, AutoModelForSequenceClassification,AutoTokenizer),
}
types_of_models=[{'bert':['bert-base-uncased']},{'xlm':['xlm-mlm-en-2048']},{'roberta':['roberta-base']},{'bart':["facebook/bart-base"]},{'longformer':['allenai/longformer-base-4096']},{'albert':['albert-xlarge-v2','albert-large-v2','albert-base-v2']},{'electra':['google/electra-large-generator']},{'reformer':['google/reformer-crime-and-punishment','google/reformer-enwik8']},{'distilbert':['distilbert-base-uncased']},{'scibert':['allenai/scibert_scivocab_uncased']}]
print("Choose from the list of models and their respective pretrained versions:")
print(types_of_models)
model_with_no_token_types =['roberta', 'bart' ,'longformer','albert','electra','reformer','distilbert','scibert']
config_class, model_classifier, model_tokenizer = MODEL_CLASSES[model_name]
tokenizer = model_tokenizer.from_pretrained(model_type)
return MODEL_CLASSES, model_with_no_token_types, tokenizer
def compute_ranks(true, pred):
ranks = []
if not true or not pred:
return ranks
targets = list(true)
for i, pred_id in enumerate(pred):
for true_id in targets:
if pred_id == true_id:
ranks.append(i + 1)
targets.remove(pred_id)
break
if targets:
warnings.warn(
'targets list should be empty, but it contains: ' + ', '.join(targets),
ListShouldBeEmptyWarning)
for _ in targets:
ranks.append(0)
return ranks
def average_precision_score(gold, pred):
if not gold or not pred:
return 0.
correct = 0
ap = 0.
true = set(gold)
for rank, element in enumerate(pred):
if element in true:
correct += 1
ap += correct / (rank + 1.)
true.remove(element)
return ap / len(gold)
def prepare_features(seq_1,seq_2, max_seq_length = 300,
zero_pad = True, include_CLS_token = True, include_SEP_token = True):
## Tokenzine Input
tokens_a = tokenizer.tokenize(seq_1)
tokens_b = tokenizer.tokenize(seq_2)
## Truncate
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
if len(tokens_b) > max_seq_length - 2:
tokens_b = tokens_b[0:(max_seq_length - 2)]
## Initialize Tokens
tokens = []
if include_CLS_token:
tokens.append(tokenizer.cls_token)
## Add Tokens and separators
for token in tokens_a:
tokens.append(token)
if include_SEP_token:
tokens.append(tokenizer.sep_token)
for token in tokens_b:
tokens.append(token)
if include_SEP_token:
tokens.append(tokenizer.sep_token)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
## Input Mask
input_mask = [1] * len(input_ids)
##Segment_ids
segment_ids = [0]*(len(tokens_a)+1)
segment_ids+= [1]*(len(tokens_b)+1)
segment_ids = [0] + segment_ids
## Zero-pad sequence length
if zero_pad:
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
#return torch.tensor(input_ids).unsqueeze(0), input_mask
return input_ids, input_mask ,segment_ids
class DefaultLemmatizer:
"""
Works best to transform texts before and also get lemmas during tokenization
"""
def __init__(self, path_data: Path = None) -> None:
if path_data is None:
self.word2lemma = {}
else:
path_anno = path_data.joinpath("annotation")
path = path_anno.joinpath("lemmatization-en.txt")
def read_csv(_path: str, names: list = None) -> po.DataFrame:
return po.read_csv(_path, header=None, sep="\t", names=names)
df = read_csv(str(path), ["lemma", "word"])
#path_extra = path_anno.joinpath(
# "expl-tablestore-export-2017-08-25-230344/tables/LemmatizerAdditions.tsv"
#)
path_extra = path_anno.joinpath("LemmatizerAdditions.tsv")
df_extra = read_csv(str(path_extra), ["lemma", "word", "useless"])
df_extra.drop(columns=["useless"], inplace=True)
df_extra.dropna(inplace=True)
length_old = len(df)
# df = po.concat([df, df_extra]) # Actually concat extra hurts MAP (0.462->0.456)
print(
f"Default lemmatizer ({length_old}) concatenated (or not) with extras ({len(df_extra)}) -> {len(df)}"
)
lemmas = df.lemma.tolist()
words = df.word.tolist()
def only_alpha(text: str) -> str:
# Remove punct eg dry-clean -> dryclean so
# they won't get split by downstream tokenizers
return "".join([c for c in text if c.isalpha()])
self.word2lemma = {
words[i].lower(): only_alpha(lemmas[i]).lower()
for i in range(len(words))
}
def transform(self, raw_texts: list) -> list:
def _transform(text: str):
return " ".join(
[self.word2lemma.get(word) or word for word in text.split()])
return [_transform(text) for text in raw_texts]
# Basic function from tfidf_baseline
def read_explanations(path):
header = []
uid = None
df = po.read_csv(path, sep='\t')
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
header.append(name)
if not uid or len(df) == 0:
warnings.warn('Possibly misformatted file: ' + path)
return []
return df.apply(
lambda r:
(r[uid], ' '.join(str(s) for s in list(r[header]) if not | po.isna(s) | pandas.isna |
"""Implement Snowfall class.
This module contains the Snowfall class used to run repeated (!)
simulations of water nucleation in vials. It makes use of class
Snowflake for the individual simulations.
"""
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
import multiprocessing as mp
from typing import List, Tuple, Union, Sequence, Optional
from ethz_snow.snowflake import Snowflake
class Snowfall:
"""A class to handle multiple Stochastic Nucleation of Water simulation.
More information regarding the equations and their derivation can be found in
XXX, Deck et al. (2021) as well as the Snowflake class documentation.
Parameters:
Nrep (int): Number of repetitions.
pool_size (int): Size of worker pool for parallelization.
stats (dict): Statistics for each simulation.
stats_df (pd.DataFrame): Long-form table of all statistics.
simulationStatus (int): Status of simulation (0 = not run, 1 = run).
"""
def __init__(self, Nrep: int = 5, pool_size: int = None, **kwargs):
"""Construct a Snowfall object.
Args:
Nrep (int, optional): Number of repetitions. Defaults to 5.
pool_size (int, optional): Size of worker pool for parallelization.
Defaults to None (= # of cpu cores).
"""
self.pool_size = pool_size
self.Nrep = int(Nrep)
if "seed" in kwargs.keys():
# seed will be chosen by Snowfall
del kwargs["seed"]
if ("storeStates" in kwargs.keys()) and (kwargs["storeStates"] is not None):
print("WARNING: States cannot be stored for Snowfall simulations.")
del kwargs["storeStates"]
Sf_template = Snowflake(**kwargs)
Sf_template._buildHeatflowMatrices() # pre-build H_int, H_ext, H_shelf
self.Sf_template = Sf_template
self.stats = dict()
self.stats_df = | pd.DataFrame() | pandas.DataFrame |
"""
Extracts basic patient information.
"""
import click
import feather
import pandas as pd
from logging import *
@click.command()
@click.option(
'--input', required=True, help='read input data from XLS file INPUT')
@click.option(
'--output',
required=True,
help='output extracted data to Feather file OUTPUT')
def main(input, output):
basicConfig(
level=INFO,
handlers=[
StreamHandler(), FileHandler(
'{}.log'.format(output), mode='w')
])
info('Reading data from {}'.format(input))
data = | pd.read_excel(input) | pandas.read_excel |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64)
tm.assert_series_equal(rs, df.y)
def _check_data(self, xp, rs):
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_array_equal(xpdata, rsdata)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, sharex=True, legend=True)
for ax in axes:
self.assert_(ax.get_legend() is not None)
axes = df.plot(subplots=True, sharex=True)
for ax in axes[:-2]:
[self.assert_(not label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_xticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_yticklabels()]
axes = df.plot(subplots=True, sharex=False)
for ax in axes:
[self.assert_(label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
@slow
def test_plot_scatter(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot, x='x', y='y', kind='scatter')
_check_plot_works(df.plot, x=1, y=2, kind='scatter')
with tm.assertRaises(ValueError):
df.plot(x='x', kind='scatter')
with | tm.assertRaises(ValueError) | pandas.util.testing.assertRaises |
import sys
import nltk
nltk.download(['punkt', 'wordnet'])
import re
import numpy as np
import pandas as pd
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, accuracy_score, recall_score, precision_score, f1_score
from sqlalchemy import create_engine
def load_data(database_filepath):
"""
Load the SQLite database from the database_filepath. Separate it to two parts:
(1) message input and (2) message type labels.
INPUTS:
database_filename: path of SQLite database of cleaned messeage table
RETURN:
X: inputs of messages
Y: labels of message categories
column_name: the categories names
"""
engine = create_engine('sqlite:///'+database_filepath)
df = | pd.read_sql('DisasterResponse',con=engine) | pandas.read_sql |
import multiprocessing
import os
import sys
import pandas
import pathlib
import shlex
import subprocess
import gzip
import bz2
from functools import partial
import shlex
# Compatible with both pre- and post Biopython 1.78:
try:
from Bio.Alphabet import generic_dna
except ImportError:
generic_dna = None
from Bio.Seq import Seq
from vtam.utils.Logger import Logger
from vtam.utils.FileParams import FileParams
from vtam.utils.PathManager import PathManager
from vtam.utils.FileSampleInformation import FileSampleInformation
from vtam.utils.FilesInputCutadapt import FilesInputCutadapt
class CommandSortReads(object):
"""Class for the Merge command"""
@staticmethod
def main(fastainfo, fastadir, sorteddir, params=None, num_threads=multiprocessing.cpu_count(),
no_reverse=False, tag_to_end=False, primer_to_end=False):
Logger.instance().info(f"OPTIONS:\n no_reverse: {not no_reverse} \n tag_to_end {not tag_to_end} \n primer_to_end {not primer_to_end}")
if sys.platform.startswith('win'):
num_threads = 1
############################################################################################
#
# params.yml parameters
#
############################################################################################
params_dic = FileParams(params).get_params_dic()
cutadapt_error_rate = params_dic['cutadapt_error_rate']
cutadapt_minimum_length = params_dic['cutadapt_minimum_length']
cutadapt_maximum_length = params_dic['cutadapt_maximum_length']
############################################################################################
#
# Loop over tag and primer pairs to demultiplex and trim reads
#
############################################################################################
merged_fastainfo_df = FileSampleInformation(fastainfo).read_tsv_into_df()
pathlib.Path(sorteddir).mkdir(parents=True, exist_ok=True)
tempdir = PathManager.instance().get_tempdir()
merged_fasta_list = []
results_list = []
sample_info = {}
# make sure every file is analysed once.
for i in range(merged_fastainfo_df.shape[0]):
if merged_fastainfo_df.iloc[i].mergedfasta not in merged_fasta_list:
merged_fasta_list.append(merged_fastainfo_df.iloc[i].mergedfasta)
for mergedfasta in merged_fasta_list:
inputFiles = FilesInputCutadapt(fastainfo, mergedfasta, no_reverse, tag_to_end)
tagFile_path = inputFiles.tags_file()
info = inputFiles.get_df_info()
for key in info.keys():
if key in sample_info.keys():
sample_info[key] = sample_info[key] + info[key]
else:
sample_info[key] = info[key]
Logger.instance().debug("Analysing FASTA file: {}".format(mergedfasta))
in_raw_fasta_path = os.path.join(fastadir, mergedfasta)
########################################################################################
#
# cutadapt --cores=0 -e 0 --no-indels --trimmed-only -g tagFile:$tagfile
# --overlap length -o "tagtrimmed.{name}.fasta" in_raw_fasta_path
#
########################################################################################
base = os.path.basename(in_raw_fasta_path)
base, base_suffix = base.split('.', 1)
out_fasta_path = os.path.join(tempdir, "sorted")
cmd_cutadapt_tag_dic = {
'in_fasta_path': in_raw_fasta_path,
'out_fasta': out_fasta_path,
'num_threads': num_threads,
'tagFile': tagFile_path,
'base_suffix': base_suffix,
}
cmd_cutadapt_tag_str = 'cutadapt --cores={num_threads} --no-indels --error-rate 0 --trimmed-only ' \
'-g file:{tagFile} --output {out_fasta}_{{name}}.{base_suffix} {in_fasta_path}' \
.format(**cmd_cutadapt_tag_dic)
Logger.instance().debug("Running: {}".format(cmd_cutadapt_tag_str))
if sys.platform.startswith("win"):
args = cmd_cutadapt_tag_str
else:
args = shlex.split(cmd_cutadapt_tag_str)
run_result = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
Logger.instance().info(run_result.stdout.decode())
inputFiles.remove_tags_file()
########################################################################################
#
# Trim primers from output
# cutadapt --quiet --cores=0 -e trim_error --no-indels --trimmed-only
# --minimum-length minimum_length --maximum-length maximum_length
# --output input_path + {name} + suffix outputfile
#
########################################################################################
primers = inputFiles.primers()
try:
tags_samples = inputFiles.get_sample_names()
except Exception as e:
Logger.instance().error(e)
return
for primer in primers:
marker, primerfwd, primerrev, lenprimerfwd, lenprimerrev = primer
for tag_sample in tags_samples:
name, run, marker2, sample, replicate, _, _ = tag_sample
if marker not in marker2:
continue
in_fasta_path = out_fasta_path + "_" + name + "." + base_suffix
baseMerge = mergedfasta.split(".")[0]
outname = run + "_" + marker + "_" + sample + "_" + replicate + "_" + baseMerge + "_trimmed"
if name.endswith("_reversed"):
outname = outname + "_reversed"
out_fasta_path_new = os.path.join(tempdir, outname + "." + base_suffix)
results_list.append(out_fasta_path_new)
if not "_reversed" in name:
if generic_dna: # Biopython <1.78
primerRev = str(Seq(primerrev, generic_dna).reverse_complement())
else: # Biopython =>1.78
primerRev = str(Seq(primerrev).reverse_complement())
primerFwd = primerfwd
lenPrimerFwd = lenprimerfwd
lenPrimerRev = lenprimerrev
else:
if generic_dna: # Biopython <1.78
primerRev = str(Seq(primerfwd, generic_dna).reverse_complement())
else: # Biopython =>1.78
primerRev = str(Seq(primerfwd).reverse_complement())
primerFwd = primerrev
lenPrimerFwd = lenprimerrev
lenPrimerRev = lenprimerfwd
cmd_cutadapt_primer_dic = {
'in_fasta_path': in_fasta_path,
'out_fasta': out_fasta_path_new,
'error_rate': cutadapt_error_rate,
'num_threads': num_threads,
'primerFwd': primerFwd,
'primerRev': primerRev,
'lenPrimerFwd': lenPrimerFwd,
'lenPrimerRev': lenPrimerRev,
'read_min_length': cutadapt_minimum_length,
'read_max_length': cutadapt_maximum_length,
}
if not primer_to_end: #works if the command is selected
cmd_cutadapt_primer_str = 'cutadapt --cores={num_threads} --no-indels --error-rate {error_rate} ' \
'--minimum-length {read_min_length} --maximum-length {read_max_length} ' \
'--trimmed-only -g "^{primerFwd}...{primerRev}$" --output {out_fasta} {in_fasta_path}'\
.format(**cmd_cutadapt_primer_dic)
else:
cmd_cutadapt_primer_str = 'cutadapt --cores={num_threads} --no-indels --error-rate {error_rate} ' \
'--minimum-length {read_min_length} --maximum-length {read_max_length} ' \
'--trimmed-only -g "{primerFwd};min_overlap={lenPrimerFwd}...{primerRev};min_overlap={lenPrimerRev}" '\
'--output {out_fasta} {in_fasta_path}'\
.format(**cmd_cutadapt_primer_dic)
Logger.instance().debug("Running: {}".format(cmd_cutadapt_primer_str))
if sys.platform.startswith("win"):
args = cmd_cutadapt_primer_str
else:
args = shlex.split(cmd_cutadapt_primer_str)
run_result = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
Logger.instance().info(run_result.stdout.decode())
###################################################################
#
# Reverse complement back rc fasta and pool
#
###################################################################
for file in results_list:
if "_trimmed" in file:
out_final_fasta_path = os.path.join(sorteddir, os.path.split(file)[-1])
in_fasta_path = os.path.join(tempdir, file)
if out_final_fasta_path.endswith(".gz"):
_open = partial(gzip.open)
elif out_final_fasta_path.endswith(".bz2"):
_open = partial(bz2.open)
else:
_open = open
if in_fasta_path.endswith(".gz"):
_open2 = partial(gzip.open)
elif in_fasta_path.endswith(".bz2"):
_open2 = partial(bz2.open)
else:
_open2 = open
if "_reversed" in file:
Logger.instance().debug("Pooling fwd and rc reads...")
out_final_fasta_path = out_final_fasta_path.replace("_reversed", "")
with _open(out_final_fasta_path, 'at') as fout:
with _open2(in_fasta_path, 'rt') as fin:
for line in fin.readlines():
if not line.startswith('>'):
if generic_dna: # Biopython <1.78
fout.write("%s\n" % str(
Seq(line.strip(), generic_dna).reverse_complement()))
else: # Biopython =>1.78
fout.write("%s\n" % str(
Seq(line.strip()).reverse_complement()))
else:
fout.write(line)
else:
with _open(out_final_fasta_path, 'at') as fout:
with _open2(in_fasta_path, 'rt') as fin:
for line in fin.readlines():
fout.write(line)
results_list = [os.path.split(result)[-1] for result in results_list if "_reversed" not in result]
del sample_info['mergedfasta']
del sample_info['primerrev']
del sample_info['primerfwd']
del sample_info['tagrev']
del sample_info['tagfwd']
sample_info['sortedfasta'] = results_list
sample_info_df = | pandas.DataFrame(sample_info) | pandas.DataFrame |
from urllib.parse import urlparse
import pytest
import pandas as pd
import numpy as np
from visions.core.implementations.types import *
from visions.application.summaries.summary import CompleteSummary
@pytest.fixture(scope="class")
def summary():
return CompleteSummary()
def validate_summary_output(test_series, visions_type, correct_output, summary):
trial_output = summary.summarize_series(test_series, visions_type)
for metric, result in correct_output.items():
assert metric in trial_output, "Metric `{metric}` is missing".format(
metric=metric
)
assert (
trial_output[metric] == result
), "Expected value {result} for metric `{metric}`, got {output}".format(
result=result, metric=metric, output=trial_output[metric]
)
def test_integer_summary(summary, visions_type=visions_integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_integer_missing_summary(summary, visions_type=visions_integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
"na_count": 0,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_float_missing_summary(summary, visions_type=visions_float):
test_series = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0, np.nan])
correct_output = {
"n_unique": 5,
"median": 2,
"mean": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 6,
"n_zeros": 1,
"na_count": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_bool_missing_summary(summary, visions_type=visions_bool):
test_series = pd.Series([True, False, True, True, np.nan])
correct_output = {"n_records": 5, "na_count": 1}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_categorical_missing_summary(summary, visions_type=visions_categorical):
test_series = pd.Series(
pd.Categorical(
[True, False, np.nan, "test"],
categories=[True, False, "test", "missing"],
ordered=True,
)
)
correct_output = {
"n_unique": 3,
"n_records": 4,
"na_count": 1,
"category_size": 4,
"missing_categorical_values": True,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_complex_missing_summary(summary, visions_type=visions_complex):
test_series = pd.Series([0 + 0j, 0 + 1j, 1 + 0j, 1 + 1j, np.nan])
correct_output = {"n_unique": 4, "mean": 0.5 + 0.5j, "na_count": 1, "n_records": 5}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_datetime_missing_summary(summary, visions_type=visions_datetime):
test_series = pd.Series(
[
| pd.datetime(2010, 1, 1) | pandas.datetime |
from pathlib import Path
import pandas as pd
from fuzzywuzzy import fuzz
import time
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
def main():
# start time of function
start_time = time.time()
# project directory
project_dir = str(Path(__file__).resolve().parents[1])
# dataframe with 'dirty' data
# zip_path = r'\data\interim\04_zipcodes_clean_test.csv'
zip_path = r'\data\interim\04_zipcodes_clean.csv'
# dataframe with 'municipalities data
mun_path = r'\data\interim\02_municipalities.csv'
# read dataframes
zip = pd.read_csv(project_dir + zip_path, sep=',')
mun = pd.read_csv(project_dir + mun_path, sep=',')
# COUNTIES THAT HAVE CHANGED ITS NAME - FIXING THE MISTAKES
# counties that have changed its name
counties_changed = mun[mun["county"] == 'karkonoski[a]'].copy()
counties_changed["county"] = "jeleniogórski"
counties_changed["concat"] = counties_changed.apply(lambda cc: (cc["municipality"] + " " + cc["county"]).lower(), axis=1)
concat = counties_changed["concat"].tolist()
concat = list(dict.fromkeys(concat))
concat.sort()
# comapring counties that changed name with the counties in ADRESS
zip["CONCAT"] = zip.apply(lambda zip: concat_match(zip["ADRESS"], concat)
if zip["ADRESS"].find('jeleniogórski') != -1
else zip['CONCAT'], axis=1)
# MUN_COU - creating one column with municipality and county
zip['MUN_COU'] = zip.apply(
lambda zip: zip['CONCAT']
if str(zip['CONCAT']).find('jeleniogórski') != -1
else zip['MUN_COU'], axis=1)
# strippirng counties that changed name from adress column
zip['ADRESS_2'] = zip.apply(lambda zip: strippping_concat(zip['ADRESS'], zip['MUN_COU'])
if str(zip['MUN_COU']).find('jeleniogórski') != -1
else zip['ADRESS_2'], axis=1)
# COUNTIES WITH TOO LONG NAMES - FIXING THE ADRESS 2 COLUMN
# extracting counties with names longer than 20 characters
mun['county_length'] = mun.apply(lambda mun: len(mun['county']), axis=1)
county_long = mun[mun['county_length'] >= 20].copy()
county_long["concat"] = county_long.apply(lambda cl: (cl["municipality"] + " " + cl["county"]).lower(), axis=1)
county_long = county_long['concat'].tolist()
county_long = list(dict.fromkeys(county_long ))
county_long.sort()
county_long_list = []
for county in county_long:
if county.find('-') != -1:
split_list = county.split('-')
county_long_list.append(split_list[0])
county_long_list.append(split_list[1])
else:
split_list = county.split(' ')
county_long_list.append(split_list[0])
county_long_list.append(split_list[1])
county_long_list = list(dict.fromkeys(county_long_list))
county_long_list.sort()
zip["CONCAT"].replace('', np.nan)
concat = zip[zip["CONCAT"].isnull()]
zip['ADRESS_2'] = zip.apply(lambda zip: strippping_adress(zip["ADRESS"], county_long_list)
if (pd.isna(zip["CONCAT"]) == True)
else zip["ADRESS_2"], axis=1)
# dropping unnecessary columns
zip.drop(columns=["CONCAT", 'CONCAT_LONG'], inplace=True)
# MUNICIPALITIES AND COUNTIES - GETTING THE RIGHT NAMES
# filling empty values with nan and then copy the previous value
zip["MUN_COU_2"] = zip["MUN_COU"]
zip["MUN_COU_2"].replace('', np.nan)
zip['MUN_COU_2'].fillna(axis=0, method='ffill', inplace=True)
# print(zip["MUN_COU_2"])
# ADRES_3 - GETTING FULL ADRESS FOR ROWS THAT ARE SPLITED
zip["MUN_COU"].replace('', np.nan)
# crating new column
zip["ADRESS_3"] = zip["ADRESS_2"]
# getting rid off adress data in empty / duplicated rows
zip["ADRESS_2"] = zip.apply(lambda zip: np.nan
if ( | pd.isna(zip["MUN_COU"]) | pandas.isna |
from __future__ import with_statement, print_function, absolute_import
from itertools import *
from functools import *
import json
from pandas import Series
from stitch.core.stitch_frame import StitchFrame
from stitch.core.utils import *
from stitch.core.errors import *
# ------------------------------------------------------------------------------
'''
.. module:: backingstore
:platform: Unix
:synopsis: Backingstore base for interfacing with Probe API
.. moduleauthor:: <NAME> <<EMAIL>>
'''
class BackingStore(Base):
def __init__(self):
super(BackingStore, self).__init__()
self._data = None
self._results = None
@property
def source_data(self):
# method for retrieving source data
raise EmptyFunction('Please define this function in your subclass')
@property
def data(self):
return self._data
@property
def results(self):
return self._results
# --------------------------------------------------------------------------
def get_database(self):
self.update()
database = {}
database['metadata'] = {}
database['metadata']['data_type'] = 'json orient=records, DataFrame'
database['data'] = self._data.to_json(orient='records')
return database
def update(self):
data = []
for datum in self.source_data:
data.append( | Series(datum) | pandas.Series |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from ast import literal_eval
from datetime import datetime
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
sub=test.copy()
print (train.shape)
print (test.shape)
# In[30]:
train.isnull().sum()
# In[31]:
test.isnull().sum()
# In[2]:
data=train.append(test)
data.reset_index(inplace=True)
data.drop('index',axis=1,inplace=True)
data.head(3)
# ### Data Descriptive Analysis
# In[3]:
data.shape
# In[4]:
data.describe(include='all')
# In[ ]:
data[data['revenue']==data['revenue'].max()]
# In[5]:
data[data['budget']==data['budget'].max()]
# In[6]:
data.head()
# # Feature Engineering
# In[7]:
# Let's see how much missing data we have here first
data.isnull().sum()
# In[8]:
# There are severl columns with high percentage of missing values. I may consider to drop them afterward.
print ('The percentage of missing value of each column:')
print ('*'*50)
print (round(data.isnull().sum()/data.shape[0]*100,2))
# In[ ]:
# For some categorical data, I'd like to fill in na with mode. I check the mode of them first to see if it is reasonable.
print (data['genres'].mode()[0])
print (data['spoken_languages'].mode()[0])
print (data['production_companies'].mode()[0])
print (data['production_countries'].mode()[0])
# In[ ]:
# There is only one missing value in 'release_date'. I'll google it and fill na manually.
data[data['release_date'].isnull()]
# In[ ]:
# The most ideal way to fill in missing value of 'spoken_language' is to use 'original_language'. However, the data
# format is quite different, may lead to redundant process. There are only 7 of them are not English. So I'll stick
# to using mode to fill na for 'spoken_languages'.
data[data['spoken_languages'].isnull()]['original_language'].value_counts()
# In[ ]:
# Since I will convert 'cast' and 'crew' into no. of cast/crew after feature engineering, I just fill in 0 here.
data['cast'].fillna('0',inplace=True)
data['crew'].fillna('0',inplace=True)
# As mentioned before, these four columns fill in with mode.
data['genres'].fillna(data['genres'].mode()[0],inplace=True)
data['production_countries'].fillna(data['production_countries'].mode()[0],inplace=True)
data['production_companies'].fillna(data['production_companies'].mode()[0],inplace=True)
data['spoken_languages'].fillna(data['spoken_languages'].mode()[0],inplace=True)
# Google says this movie's release date is 3/20/01. I choose to believe Google.
data['release_date'].fillna('3/20/01',inplace=True)
# For the continuous variable, fill in with mean value.
data['runtime'].fillna(data['runtime'].mean(),inplace=True)
# Just using 'original_title' to fill in 'title'.
data['title'].fillna(data['original_title'],inplace=True)
# In[ ]:
# Beautiful!! We have sorted most of the missing values.
data.isnull().sum()
# In[9]:
# Convert 'belongs_to_collection' to binary value: is or is not serial movie.
data['belongs_to_collection'].fillna(0,inplace=True)
data['belongs_to_collection']=data['belongs_to_collection'].apply(lambda x:1 if x!=0 else x)
# In[10]:
# Almost all of movies are released. This variable maybe is not so useful.
data['status'].value_counts()
# In[ ]:
# I'm not gonna dig into analysing 'words' stuffs. These are the variables I'm not gonna use in my model.
notusing=['Keywords',
'homepage',
'id',
'imdb_id',
'original_language',
'original_title',
'overview',
'poster_path',
'status',
'tagline']
data.drop(notusing,axis=1,inplace=True)
# In[ ]:
data.head()
# In[ ]:
# Now let's create some functions dealing with the columns with json-like values.
def find_name(string):
s=eval(string) # list of dict
l=[]
for i in s:
l.append(i['name'])
return l
def find_language(string):
t=eval(string)
l=[]
for i in t:
l.append(i['iso_639_1'])
return l
def find_actors(string):
if eval(string)==0:
return 0
else:
t=eval(string)
l=[]
for i in t:
l.append(i['name'])
return l
# In[ ]:
# Apply the functions to those json-like columns.
data['cast']=data['cast'].apply(find_actors)
data['crew']=data['crew'].apply(find_actors)
data['genres']=data['genres'].apply(find_name)
data['production_companies']=data['production_companies'].apply(find_name)
data['production_countries']=data['production_countries'].apply(find_name)
data['spoken_languages']=data['spoken_languages'].apply(find_language)
# I converted 'cast' and 'crew' into the no. of cast/crew, which is doable after the previous process.
data['no_of_cast']=data['cast'].apply(lambda x:len(x) if x!=0 else 0)
data['no_of_crew']=data['crew'].apply(lambda x:len(x) if x!=0 else 0)
data.drop(['cast','crew'],axis=1,inplace=True)
data.head()
# In[ ]:
# Most of the movies containing 1 to 3 genres. Some have more.
print ('Movies with each no. of genres')
print ('*'*50)
print (data['genres'].apply(lambda x:len(x)).value_counts())
# In[11]:
# Convert the 'genres' into dummy variables.
# The logic behind this transformation can be found here. https://stackoverflow.com/questions/29034928/pandas-convert-a-column-of-list-to-dummies
# It is quite clear to me, doing me a huge favor.
data=pd.get_dummies(data['genres'].apply(pd.Series).stack()).sum(level=0).merge(data,left_index=True,right_index=True)
# In[ ]:
data.head()
# ### The way I fill in missing value in 'budget', which is 0, is fill in with the mean budget of the genres that movie contains. I calculate mean budget of each genre and then put it back to the missing budget.
# In[12]:
# Firtly, calculate the mean budget of each genre.
list_of_genres=[]
for i in data['genres']:
for j in i:
if j not in list_of_genres:
list_of_genres.append(j)
d={}
for i in list_of_genres:
genre=i
mean_budget=data.groupby(i)['budget'].mean()
d[genre]=mean_budget[1]
pd.Series(d).sort_values()
# In[13]:
# This part is just for inspection. To see how many countries/companies/languages in total
list_of_companies=[]
for i in data['production_companies']:
for j in i:
if j not in list_of_companies:
list_of_companies.append(j)
list_of_countries=[]
for i in data['production_countries']:
for j in i:
if j not in list_of_countries:
list_of_countries.append(j)
len(list_of_countries)
list_of_language=[]
for i in data['spoken_languages']:
for j in i:
if j not in list_of_language:
list_of_language.append(j)
len(list_of_language)
print ('The total number of company occurs is {}'.format(len(list_of_companies)))
print ('The total number of country occurs is {}'.format(len(list_of_countries)))
print ('The total number of language occurs is {}'.format(len(list_of_language)))
# In[14]:
# Replace the 0 budget with nan.
data['budget'].replace(0,np.nan,inplace=True)
data[data['budget'].isnull()][['budget','genres']].head(10)
# In[15]:
# This function will calculate the mean budget value of that movie.
# For example, for the index 4 movie, the function will calculate the mean of the mean budget of Action and the mean
# budget of Thriller.
def fill_budget(l):
el=[]
for i in l:
if d[i] not in el:
el.append(d[i])
return (np.mean(el))
# In[16]:
data['budget'].fillna(data['genres'].apply(fill_budget),inplace=True)
# In[17]:
# Most of the movies are produced by 1 to 3 companies. Some have more.
print ('Movies with each no. of production company')
print ('*'*50)
data['production_companies'].apply(lambda x:len(x)).value_counts()
# In[18]:
# Most of the movies was shoot in under 2 countries. Some have more.
print ('Movies with each no. of production_countries')
print ('*'*50)
data['production_countries'].apply(lambda x:len(x)).value_counts()
# In[ ]:
# Surprisingly, the budget doesn't have much to do with how many countries the movie was shoot, but how many companies
# involved.
data['no_of_country']=data['production_countries'].apply(lambda x:len(x))
data['no_of_company']=data['production_companies'].apply(lambda x:len(x))
data[['budget','no_of_country','no_of_company']].corr()
# ### Deal with release date
# In[19]:
data['release_date'].head()
# In[20]:
# If we just apply the datetime function of panda, there will be some year like 2048, 2050, 2072 happen. I'm pretty
# sure the time traveling has not been invented yet. The year has to be handled first. If it is greater than 18, it
# must be 19xx.
def fix_year(x):
year=x.split('/')[2]
if int(year)>18:
return x[:-2]+'20'+year
else:
return x[:-2]+'19'+year
data['release_date']=data['release_date'].apply(fix_year)
data['release_date']=pd.to_datetime(data['release_date'],infer_datetime_format=True)
# In[21]:
# There still might be some ambiguities like 11, 15, 09. How does computer know it refers to 2011 or 1911? It don't.
# So eventually I decided not to use the 'year'. But the month and date and weekday can still be really informative.
#data['year']=data['release_date'].dt.year
data['month']=data['release_date'].dt.month
data['day']=data['release_date'].dt.day
data['weekday']=data['release_date'].dt.weekday
# Mapping weekday and month.
data['weekday']=data['weekday'].map({0:'Mon',1:'Tue',2:'Wed',3:'Thur',4:'Fri',5:'Sat',6:'Sun'})
data['month']=data['month'].map({1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun',7:'July',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'})
# In[22]:
data[['release_date','month','day','weekday']].head()
# In[23]:
data.drop(['release_date'],axis=1,inplace=True)
data.iloc[:5,20:]
# In[24]:
# There are nearly 90 movies are English.
# I'd like to know will the movie is/is not foreign language affect the revenue?
l=[]
for i in data['spoken_languages']:
if 'en' in i:
l.append(i)
len(l)/data.shape[0]
# In[25]:
# Convert 'spoken_languages' into binary variable 'language_en'.
def en_or_not(l):
if 'en' in l:
return 1
else:
return 0
data['language_en']=data['spoken_languages'].apply(en_or_not)
data.drop('spoken_languages',axis=1,inplace=True)
# In[26]:
# Same situation in 'production_countries'. Nearly 80% movies were shoot in USA.
u=[]
for i in data['production_countries']:
if 'United States of America' in i:
u.append(i)
len(u)/data.shape[0]
# In[27]:
# Convert 'production_countries' into binary variable 'produce_in_USA'
def usa_or_not(l):
if 'United States of America' in l:
return 1
else:
return 0
data['produce_in_USA']=data['production_countries'].apply(usa_or_not)
data.drop('production_countries',axis=1,inplace=True)
# In[44]:
top_company=pd.read_csv('../input/top-company-list/top_company.csv')
top_company=top_company['Production Company'].tolist()
top_company[:5]
# In[45]:
data.iloc[:,20:].head()
# In[46]:
#data['top_director']=data['director'].apply(lambda x:1 if x in top_director else 0)
def get_top_company(l):
n=0
for i in l:
if i in top_company:
n+=1
return n
data['top_production_company']=data['production_companies'].apply(get_top_company)
data.drop('production_companies',axis=1,inplace=True)
# ## Normalisation
# In[47]:
# To avoid the model being affected by the scale of each variable, normalise the continuous variable.
data['budget']=data['budget'].apply(lambda x:(x-np.min(data['budget']))/(np.max(data['budget']-np.min(data['budget']))))
data['popularity']=data['popularity'].apply(lambda x:(x-np.min(data['popularity']))/(np.max(data['popularity']-np.min(data['popularity']))))
data['runtime']=data['runtime'].apply(lambda x:(x-np.min(data['runtime']))/(np.max(data['runtime']-np.min(data['runtime']))))
# In[48]:
# Set the index to movie title, and we are ready to go!!
data.set_index('title',inplace=True)
data.head()
# In[49]:
plt.figure(figsize=(20,12))
plt.title('Violin plot of revenue of each month',fontsize=20)
sns.violinplot(x=data[data['revenue'].notnull()]['month'],y=data[data['revenue'].notnull()]['revenue'],scale='count')
# In[50]:
plt.figure(figsize=(20,12))
plt.title('Violin plot of revenue of each weekday',fontsize=20)
sns.violinplot(x=data[data['revenue'].notnull()]['weekday'],y=data[data['revenue'].notnull()]['revenue'],scale='count')
# In[51]:
data.drop('genres',axis=1,inplace=True)
# Month and weekday need to be converted to dummy variables.
data=pd.get_dummies(data)
# In[52]:
data.iloc[:5,20:29]
# ### All done!!! We're good to do some exploratory analysis!
# # EDA
# In[53]:
# For the EDA, I only use the training dataset.
Train=data[data['revenue'].notnull()]
Train.head(5)
# In[54]:
# Does a movie is serial or not matter?
print (Train.groupby('belongs_to_collection')['revenue'].mean())
Train.groupby('belongs_to_collection')['revenue'].mean().plot.barh()
# In[55]:
Train['belongs_to_collection'].value_counts()
# In[56]:
sns.swarmplot(x=Train['belongs_to_collection'],y=Train['revenue'])
# In[57]:
list_of_genres
# In[58]:
# Similar to creating a series of mean budget of each genre, here we create 'revenue'.
g={}
for i in list_of_genres:
mean_rev=Train.groupby(i)['revenue'].mean()
g[i]=mean_rev[1]
g
# In[59]:
plt.figure(figsize=(20,8))
pd.Series(g).sort_values().plot.barh()
plt.title('Mean revenue of each genre',fontsize=20)
plt.xlabel('Revenue',fontsize=20)
# In[60]:
print (pd.DataFrame(Train.groupby('language_en')['revenue'].mean()))
plt.figure(figsize=(10,4))
Train.groupby('language_en')['revenue'].mean().sort_values().plot.barh()
plt.title('Mean revenue of is or is not foreign film.',fontsize=20)
plt.xlabel('Revenue',fontsize=20)
# In[61]:
plt.figure(figsize=(10,4))
sns.swarmplot(x=Train['language_en'],y=Train['revenue'])
plt.title('Swarm plot of is or is not foreign film',fontsize=20)
# In[62]:
print (pd.DataFrame(Train.groupby('produce_in_USA')['revenue'].mean()))
plt.figure(figsize=(10,4))
Train.groupby('produce_in_USA')['revenue'].mean().sort_values().plot.barh()
plt.title('Mean revenue of shoot in USA or not')
plt.xlabel('revenue')
# In[63]:
plt.figure(figsize=(10,4))
sns.swarmplot(x=Train['produce_in_USA'],y=Train['revenue'])
plt.title('Swarm plot of movie produced in USA or not',fontsize=20)
# In[64]:
plt.figure(figsize=(10,4))
plt.title('Mean revenue of each no. of top production company',fontsize=20)
Train.groupby('top_production_company')['revenue'].mean().plot.bar()
plt.xlabel('No. of top production company',fontsize=20)
plt.ylabel('Revenue',fontsize=20)
# In[65]:
plt.figure(figsize=(10,4))
plt.title('Swarm plot of mean revenue of each no. of top production company',fontsize=20)
plt.xlabel('No. of top production company',fontsize=20)
plt.ylabel('Revenue',fontsize=20)
sns.swarmplot(x=Train['top_production_company'],y=Train['revenue'])
# In[66]:
plt.figure(figsize=(8,8))
plt.scatter(Train['runtime'],Train['revenue'])
plt.title('Scatter plot of runtime vs revenue',fontsize=20)
plt.xlabel('runtime',fontsize=20)
plt.ylabel('Revenue',fontsize=20)
# In[67]:
plt.figure(figsize=(8,8))
plt.scatter(Train['budget'],Train['revenue'])
plt.title('Scatter plot of budget vs revenue',fontsize=20)
plt.xlabel('budget',fontsize=20)
plt.ylabel('Revenue',fontsize=20)
# In[68]:
plt.figure(figsize=(8,8))
plt.scatter(Train['popularity'],Train['revenue'])
plt.title('Scatter plot of popularity vs revenue',fontsize=20)
plt.xlabel('popularity',fontsize=20)
plt.ylabel('Revenue',fontsize=20)
# In[69]:
month=['Jan','Feb','Mar','Apr','May','Jun','July','Aug','Sep','Oct','Nov','Dec']
m={}
for i in month:
mean=Train.groupby('month_'+i)['revenue'].mean()
m[i]=mean[1]
pd.Series(m)
# In[70]:
for i in month:
print (i,Train['month_'+i].value_counts()[1])
# In[71]:
plt.figure(figsize=(20,8))
| pd.Series(m) | pandas.Series |
"""
Construct the graph representation of brain imaging and population graph
"""
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics.pairwise import cosine_similarity
def brain_graph(logs, atlas, path, data_folder):
if not os.path.exists(path):
os.makedirs(path)
# the global mean is not included in ho_labels.csv
atlas.loc[-1] = [3455, 'Background']
print(atlas.shape)
# label the regions as right/left/global mean
label = []
for e in atlas['area'].values:
if e.startswith('Left'):
label.append(0)
elif e.startswith('Right'):
label.append(1)
else:
label.append(-1)
atlas['label'] = label
atlas.sort_values('index', inplace=True)
atlas = atlas.reset_index().drop('level_0', axis=1)
###################
# Adjacent matrix #
###################
print('Processing the adjacent matrix...')
# now the index in [0, 110]
adj = np.zeros([111, 111])
not_right = [i for i in range(111) if atlas['label'][i] != 1]
not_left = [i for i in range(111) if atlas['label'][i] != 0]
not_gb = [i for i in range(111) if atlas['label'][i] != -1]
# Build the bipartite brain graph
for idx in range(111):
if atlas['label'][idx] == 0:
adj[idx, not_left] = 1
elif atlas['label'][idx] == 1:
adj[idx, not_right] = 1
elif atlas['label'][idx] == -1:
adj[idx, not_gb] = 1
# now form the sparse adj matrix
# node id:[1, 111*871]
node_ids = np.array_split(np.arange(1, 111 * 871 + 1), 871)
adj_matrix = []
for i in range(871):
node_id = node_ids[i]
for j in range(111):
for k in range(111):
if adj[j, k]:
adj_matrix.append([node_id[j], node_id[k]])
# save sparse adj matrix
| pd.DataFrame(adj_matrix) | pandas.DataFrame |
from lib.allgemein import liste_in_floats_umwandeln
import pandas as pd
import untangle
from decimal import *
#written by <NAME>
def get_xml_RecordTime_excitationwl(dateiname):
obj = untangle.parse(dateiname)
RecordTime = obj.XmlMain.Documents.Document['RecordTime']
excitationwl = float(obj.XmlMain.Documents.Document.xDim.Calibration['LaserWave'])
return RecordTime, excitationwl
def get_timestamps(dateiname):
obj = untangle.parse(dateiname)
predf = []
for i in range(0, len(obj.XmlMain.Documents.Document.Data.Frame)):
timestamp = obj.XmlMain.Documents.Document.Data.Frame[i]['TimeStamp']
timestamp = Decimal(timestamp)
predf.append(timestamp)
posi = list(range(0, len(predf), 1))
colunames = []
for i in posi:
colu = 'Frame ' + str(i + 1)
colunames.append(colu)
df = pd.DataFrame(predf, index=colunames, columns=['timestamp'])
df_timestamps = df.transpose()
return df_timestamps
def get_positions(dateiname):
obj = untangle.parse(dateiname)
predf = []
for i in range(0,len(obj.XmlMain.Documents.Document.Data.Frame)):
positions = obj.XmlMain.Documents.Document.Data.Frame[i]['ValuePosition']
z = positions.split(";")
ft = liste_in_floats_umwandeln(z)
predf.append(ft)
posi=list(range(0, len(predf),1))
colunames = []
for i in posi:
colu = 'Frame ' + str(i + 1)
colunames.append(colu)
df = pd.DataFrame(predf, index=colunames, columns=['x [µm]','y [µm]','z [µm]'])
df = df.transpose()
return df
def get_relwavenumber(dateiname):
obj = untangle.parse(dateiname)
relwavenumber = obj.XmlMain.Documents.Document.xDim.Calibration['ValueArray']
relwavenumber = relwavenumber.split('|')
predf = liste_in_floats_umwandeln(relwavenumber)
del predf[0]
df1 = | pd.DataFrame(predf, columns=['relWavenumber [1/cm]']) | pandas.DataFrame |
'''
This file is used to read in the json files that contain the metadata from the PartNet meta data files and ultimately creates the first round of the dfmeta file.
It uses some complicated tree structures to capture the data and attempt to generate a description.
However, this method was mostly overwritten by the descriptor.py file which uses a different methodology to make the descriptions.
But that file still uses a large amount of the data generated here.
This file is only intended to be run locally.
'''
#%% Imports
import json
import os
import inflect
from nltk.corpus import wordnet as wn
import pandas as pd
import pandas_ods_reader as por
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import utils as ut
import configs as cf
#%% Setup text processing
inflect = inflect.engine()
vocab = set()
#%% Define loadset
cats_to_load = ['Table','Chair','Lamp','Faucet','Clock','Bottle','Vase','Laptop','Bed','Mug','Bowl']
catids_to_load = [4379243,3001627,3636649,3325088,3046257,2876657,3593526,3642806,2818832,3797390,2880940]
#%% Get JSON files and preprocess
def getAndPrintJSON(index, dir_fp, reverse=False, verbose=False) :
indent = 2
json_res_fp = os.path.join(dir_fp, str(index), 'result_after_merging.json')
json_meta_fp = os.path.join(dir_fp, str(index), 'meta.json')
meta = ut.getJSON(json_meta_fp)
mid = meta['model_id']
mcat = meta['model_cat']
annoid = meta['anno_id']
if (not mcat in cats_to_load) : return None
if (verbose) : print('{}\n{}\n{}'.format(index, mid, mcat))
res = ut.getJSON(json_res_fp)
json_formatted_str = json.dumps(res, indent=indent)
resl = json_formatted_str.splitlines()
output = []
index = 0
for line in resl :
if ('name' in line) :
level = int(line.split(':')[0].count(' ') / (2*indent) - 1)
name = line.split('"')[-2]
name = name.replace('_', ' ')
output.append({'name':name, 'level':level})
vocab.add(name)
index = index + 1
if (output[0]['level'] > output[-1]['level']) :
output.reverse()
output.insert(0, {'mid':mid})
output.insert(1, {'mcat':mcat})
output.insert(2, {'annoid':annoid})
return output
#%% Get all JSON data
json_dir = cf.PARTNET_META_STATS_DIR
json_fps = os.listdir(json_dir)
data = []
for fp in tqdm(json_fps, total=len(json_fps)) :
shape = getAndPrintJSON(fp, json_dir, False)
if (shape == None) : continue
data.append(shape)
dfmids = pd.DataFrame([ [item[0]['mid'], item[1]['mcat'], item[2]['annoid']] for item in data], columns=['mid', 'cattext', 'annoid'])
#%% Explore vocab
def getSyns(word) :
syns = wn.synsets(word)
synonyms = []
for syn in syns :
for l in syn.lemmas() :
synonyms.append(l.name())
return synonyms
def getDef(word) :
syns = wn.synsets(word)
if len(syns) > 0 :
return syns[0].definition()
else :
return 'N/A'
for word in vocab :
for w in word.split(' ') :
syns = getSyns(w)
print('{:32} {}'.format(w, set(syns[1:5])))
for word in vocab :
for w in word.split(' |') :
print('{:32} {}'.format(w, getDef(w)))
for word in vocab :
defs = wn.synsets(word)
print(word)
if len(defs) > 0 :
print('{} : {}'.format(word, defs[0]))
#%% Node class for gathering tree info from the given part hierarchy
class Node:
def __init__(self, name='root', level=0):
self.name = name
self.children = []
self.level = level
self.total_children = 0
self.quantity = 1
self.mid = ''
self.mcat = ''
self.desc_level = 0
self.desc_max = 0
def __repr__(self):
return "{}".format(self.name)
def printTree(self):
for child in self.children:
print('{:2d} : {} : {} : {}{}'.format(child.level, child.total_children, child.quantity, ' '*child.level*2, child.name))
child.printTree()
def getDetails(self, names_only=False) :
desc = ''
if not self.name == 'root' :
desc = ' {},{},{},{}|'.format(self.name, self.level, len(self.children), self.quantity)
if (names_only) : desc = '{}|'.format(self.name)
for child in self.children :
desc = desc + child.getDetails(names_only)
return desc
def treesEqual(self, root1, root2) :
return (root1.getDetails() == root2.getDetails())
def collapseMulti(self) :
if (len(self.children) == 0) :
return
child_descs = [child.getDetails() for child in self.children]
uniq_descs = set(child_descs)
ids_to_remove = []
for uniq in uniq_descs :
found = False
for i, entry in enumerate(child_descs) :
if (uniq==entry and not found) :
# print(' {} : {} : {} '.format(entry, i, len(self.children)))
self.children[i].quantity = child_descs.count(uniq)
found = True
elif (uniq==entry and found) :
# print('removed', entry, i)
ids_to_remove.append(i)
for index in sorted(ids_to_remove, reverse=True):
del self.children[index]
for child in self.children :
child.collapseMulti()
def removeZeros(self) :
for child in reversed(self.children) :
if (child.quantity == 0) :
self.children.remove(child)
child.removeZeros()
if (child.quantity > 1) : child.name = inflect.plural(child.name)
def sumUp(self) :
total = len(self.children)
for child in self.children :
total = total + child.sumUp()
self.total_children = total
return total
def dSmall(self) :
names = list(filter(None, self.getDetails(names_only=True).split('|')))
desc = ''
if len(names) == 0 : desc = 'Nothing'
if len(names) == 1 : desc = '{}.'.format(names[0])
if len(names) == 2 : desc = '{} with a {}.'.format(names[0], names[1])
if len(names) == 3 : desc = '{} with a {} made of a {}.'.format(names[0],names[1], names[2])
if len(names) >= 4 : desc = 'Overload'
return desc
def dReg(self, index, outof) :
if (len(self.children) == 0) :
return 'OVERLOAD: {} '.format(self.name)
# return ' {} {}.'.format( self.quantity if self.quantity>1 else 'a', self.name)
if (len(self.children) == 1) :
# multi = 'each of ' if self.quantity > 1 else ''
multi = (self.quantity > 1)
if (len(self.children[0].children) == 0) :
if multi :
return 'each {} is made of {} {}. '.format( inflect.singular_noun(self.name), self.children[0].quantity if self.children[0].quantity>1 else 'a', self.children[0].name)
else :
return 'the {} is made of {} {}. '.format(self.name, self.children[0].quantity if self.children[0].quantity>1 else 'a', self.children[0].name)
elif (len(self.children[0].children) == 1) : # has just 1 child
return 'the {} which is {} '.format(self.name, self.children[0].dReg(index+1, outof))
else : # has multiple children
return 'the {} which is {} '.format(self.name, self.children[0].dReg(index+1, outof))
desc_subs = ''
i = len(self.children)
imax = i
for child in self.children :
singular = not inflect.singular_noun(child.name)
multi = 'a' if singular else ' {} '.format(child.quantity)
template = ', {} {} '
if i==imax : template = '{} {} '
if i==1 : template = 'and {} {} '
desc_subs = desc_subs + template.format(multi, child.name)
i = i - 1
return 'the {} has {}. '.format(self.name, desc_subs)
def dRoot(self) :
if (self.mcat == 'Vase') :
return self.dRootVase()
if (self.level<1 and self.total_children < 3) :
return ' {} '.format(self.dSmall())
if (len(self.children) > 1 or self.level >= 2) :
desc_subs = ''
desc_all_subs = ''
i = len(self.children)
imax = i
for child in self.children :
singular = not inflect.singular_noun(child.name)
multi = 'a' if singular else 'a set of {}'.format(child.quantity)
template = '{}, {} {}'
if i==imax : template = '{} {} {}'
if i==1 : template = '{} and {} {}'
desc_subs = template.format(desc_subs, multi, child.name)
desc_all_subs = desc_all_subs + (child.dReg(1,0) if (len(child.children)>0) else '')
i = i - 1
return 'a {} that is made of {}. {}'.format(self.name, desc_subs, desc_all_subs)
else :
return '{} that is {}'.format( self.name, self.children[0].dRoot() )
def dSmallVase(self) :
names = list(filter(None, self.getDetails(names_only=True).split('|')))
desc = ''
if len(names) == 0 : desc = 'Nothing'
if len(names) == 1 : desc = '{}.'.format(names[0])
if len(names) == 2 : desc = '{} with a {}.'.format(names[0], names[1])
if len(names) == 3 : desc = '{} with a {} made of a {}.'.format(names[0],names[1], names[2])
if len(names) >= 4 : desc = 'Overload'
return desc
def dRegVase(self) :
if self.name == 'containing things' :
desc_subs = ''
i = len(self.children)
imax = i
for child in self.children :
singular = not inflect.singular_noun(child.name)
multi = 'a' if singular else ' {} '.format(child.quantity)
template = ', {} {} '
if i==imax : template = '{} {} '
if i==1 : template = 'and {} {} '
desc_subs = desc_subs + template.format(multi, child.name)
i = i - 1
return ' the {} contains {}. '.format('vase', desc_subs)
if (len(self.children) == 0) :
return 'OVERLOAD: {} '.format(self.name)
if (len(self.children) == 1) :
multi = (self.quantity > 1)
if (len(self.children[0].children) == 0) :
if multi :
return 'each {} is made of {} {}. '.format( inflect.singular_noun(self.name), self.children[0].quantity if self.children[0].quantity>1 else 'a', self.children[0].name)
else :
return 'the {} is made of {} {}. '.format(self.name, self.children[0].quantity if self.children[0].quantity>1 else 'a', self.children[0].name)
elif (len(self.children[0].children) == 1) : # has just 1 child
return 'the {} which is {} '.format(self.name, self.children[0].dRegVase())
else : # has multiple children
return 'the {} which is {} '.format(self.name, self.children[0].dRegVase())
desc_subs = ''
i = len(self.children)
imax = i
for child in self.children :
singular = not inflect.singular_noun(child.name)
multi = 'a' if singular else ' {} '.format(child.quantity)
template = ', {} {} '
if i==imax : template = '{} {} '
if i==1 : template = 'and {} {} '
desc_subs = desc_subs + template.format(multi, child.name)
i = i - 1
return 'the {} has {}. '.format(self.name, desc_subs)
def dRootVase(self) :
new_children = self.children
if (self.level<1 and (self.total_children) < 3) :
return ' {} '.format(self.dSmallVase())
if (len(new_children) > 1 or self.level >= 2) :
desc_subs = ''
desc_all_subs = ''
i = len(new_children)
imax = i
for child in new_children :
if child.name == 'containing things' :
desc_all_subs = desc_all_subs + child.dRegVase()
continue
multi = 'a' if child.quantity > 0 else 'a set of {}'.format(child.quantity)
template = '{}, {} {}'
if i==imax : template = '{} {} {}'
if i==1 : template = '{} and {} {}'
desc_subs = template.format(desc_subs, multi, child.name)
desc_all_subs = desc_all_subs + (child.dRegVase() if (len(child.children)>0) else '')
i = i - 1
return 'a {} that is made of {}. {}'.format(self.name, desc_subs, desc_all_subs)
else :
return '{} that is {}'.format( self.name, new_children[0].dRootVase() )
'''
Exampple descriptions and tree structures :
a chair that has a chair back and a chair seat. the chair back is made of a back surface with a back
single surface. the chair seat is made of a seat surface with a seat single surface.
0 : 6 : 1 : chair
1 : 2 : 1 : chair back
2 : 1 : 1 : back surface
3 : 0 : 1 : back single surface
1 : 2 : 1 : chair seat
2 : 1 : 1 : seat surface
3 : 0 : 1 : seat single surface
0 : 12 : 1 : table
1 : 11 : 1 : regular table
2 : 7 : 1 : table base
3 : 6 : 1 : regular leg base
4 : 0 : 2 : circular stretchers
4 : 0 : 4 : legs
2 : 2 : 1 : tabletop
3 : 1 : 1 : tabletop surface
4 : 0 : 1 : board
0 : 8 : 1 : pot
1 : 2 : 1 : containing things
2 : 0 : 1 : liquid or soil
2 : 0 : 1 : plant
1 : 1 : 1 : body
2 : 0 : 1 : container
1 : 2 : 1 : base
2 : 1 : 1 : foot base
3 : 0 : 4 : feet
a pot that is made of a body and a base. it contains liquid or soil and a plant. the body is made of a container and the
base is made of a food base with 4 feet.
'''
#%% Generate descriptions
phrases = {
'combiner' : ['with', 'with', 'with', 'that has', 'made up of'],
'starter' : ['a'],
'multi' : ['each with', 'each one has', 'each having'],
}
def getShapeTree(data, max_depth = 10) :
fdata = [item for item in data[3:] if item['level'] <= max_depth]
root = Node()
root.mid = data[0]['mid']
root.mcat = data[1]['mcat']
root.annoid = data[2]['annoid']
# print('{} {} {}'.format(len(data), root.mid, root.mcat, len(root.children)))
for record in fdata:
last = root
for _ in range(record['level']):
last = last.children[-1]
last.children.append(Node(record['name'], record['level']))
root.collapseMulti()
# root.removeZeros()
root.sumUp()
return root
fix_replacements = [[' ', ' '],
[' .', '.'],
['..', '.'],
[' ,', ',']]
def removeDescExtras(desc) :
for rp in fix_replacements :
desc = desc.replace(rp[0], rp[1])
if desc.startswith('a ') : desc = desc[2:]
return desc
#%% Get shape descriptions based on tree hierarchies and create dataframe
# rn.shuffle(data)
cat_index = 3 # cats_to_load = [' 0 Table','1 Chair','2 Lamp','3 Faucet','4 Clock','5 Bottle','6 Vase','7 Laptop','8 Knife']
cat_data = [entry for entry in data if cats_to_load[cat_index] in entry[1]['mcat']]
shapes = [getShapeTree(shape, 4) for shape in data]
dfdesc = {}
for index, shape in enumerate(shapes) :
desc = '{}'.format(shape.children[0].dRoot()) if not shape.mcat == 'Vase' else '{}'.format(shape.children[0].dRootVase())
desc = removeDescExtras(desc)
details = shape.getDetails()
dfdesc[shape.mid] = [shape.mcat, shape.annoid, desc, details]
# print('\nIndex: {:2d} {} {}\n{}'.format(index, shape.mcat, shape.mid, desc))
# shape.prinTree()
dfdesc = pd.DataFrame.from_dict(dfdesc, orient='index')
dfdesc.columns = ['cattext', 'annoid', 'desc', 'details']
dfdesc.index.name = 'mid'
#%% Inspect trees
for i, shape in enumerate(shapes[:20]) :
print('\nIndex: {}'.format(i))
shape.printTree()
#%% Load all meta data filepaths from ShapeNetCore database
meta_dir = '/media/starstorms/DATA/Insight/ShapeNet/stats/ShapeNetCore.v2'
meta_fps = []
for dirName, subdirList, fileList in os.walk(meta_dir):
for fname in fileList:
fullpath = os.path.join(dirName, fname)
meta_fps.append(fullpath)
#%% Get all metadata and put into DF (takes a long time, use precomputed below)
dfmeta = pd.DataFrame(columns = ['mid', 'cat', 'numv',
'xmin', 'xmax', 'centx', 'dx',
'ymin', 'ymax', 'centy', 'dy',
'zmin', 'zmax', 'centz', 'dz'])
i = 0
for meta_fp in tqdm(meta_fps, total=len(meta_fps)) :
meta_js = ut.getJSON(meta_fp)
mcat = meta_fp.split('/')[8]
dfmeta.loc[i] = [meta_js['id'], mcat, meta_js['numVertices'],
meta_js['min'][0], meta_js['max'][0], meta_js['centroid'][0], meta_js['max'][0] - meta_js['min'][0],
meta_js['min'][1], meta_js['max'][1], meta_js['centroid'][1], meta_js['max'][1] - meta_js['min'][1],
meta_js['min'][2], meta_js['max'][2], meta_js['centroid'][2], meta_js['max'][2] - meta_js['min'][2] ]
i = i + 1
#%% Write / reload the data from the previous cell
# pd.DataFrame.to_csv(dfmeta, '/home/starstorms/Insight/ShapeNet/meta/df_meta_raw.csv')
dfmeta = pd.read_csv('/home/starstorms/Insight/ShapeNet/meta/df_meta_raw.csv')
#%% Read tax and meta info
tax = ut.readTax()
tax_cats = tax[tax.synsetId.isin(catids_to_load)]
# dfmeta = ut.readMeta()
# dfmeta.drop(['desc','cattext'], axis=1, inplace=True)
#%% Fix and normalize numeric columns of interest
dffixcols = ['dx','dy','dz', 'dsq']
dfnormcols = ['dx','dy','dz','dsq', 'numv']
dfall = pd.DataFrame.merge(dfmeta, dfdesc[['cattext', 'annoid', 'desc', 'details']], how='left', on=['mid', 'mid'])
dfall = dfall.drop_duplicates(subset='mid')
dfall['dsq'] = abs(abs(dfall.dx) - abs(dfall.dz))
dfall[dffixcols] = dfall[dffixcols].div(dfall[dffixcols].sum(axis=1), axis=0)
# dfall[dfnormcols] = dfall[dfnormcols].apply(stats.zscore)
# dfstats = [dfall[dfall.cattext==cattxt][dfnormcols].describe().reset_index() for cattxt in cats_to_load]
#%% Create shape overall classes based on bboxs
duniq = dfall.cat.unique()
qbins = [0, .1, .3, .7, .9, 1.0]
dfclasscols = [col.replace('d','c') for col in dffixcols]
for col in dfclasscols : dfall[str(col)] = int(len(qbins)/2)
for col1, col2 in zip(dffixcols, dfclasscols) :
for catid_uniq in duniq :
dfall[col2].loc[dfall.cat==catid_uniq] = | pd.qcut(dfall[col1].loc[dfall.cat==catid_uniq], labels=False, q=qbins, precision=0, duplicates='drop') | pandas.qcut |
# Import Packages
from IPython.display import clear_output
import pandas as pd
import requests
from requests.utils import requote_uri
from fake_useragent import UserAgent
from lxml import html
from bs4 import BeautifulSoup
from tqdm import tqdm
import time
from textblob import TextBlob
from langdetect import detect
import re
import random
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import pickle
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix ,accuracy_score, classification_report
from sklearn.naive_bayes import BernoulliNB, GaussianNB, ComplementNB
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import nltk
import gensim
import scipy.stats as st
import numpy as np
import matplotlib.pyplot as plt
def fix_http(URL):
if URL != '':
if ('http' in URL) & (URL[-1:] == '/'):
return URL
elif ('http' in URL) & (URL[-1:] != '/'):
return URL + '/'
elif ('http' not in URL) & (URL[-1:] == '/'):
return 'http://' + URL
else:
return 'http://' + URL + '/'
ua = UserAgent()
def get_html(URL, Timeout):
header = {'User-Agent': str(ua.random)}
try:
page = requests.get(URL, timeout=Timeout, headers=header)
except:
return None
return page.text
def get_html_ssl(URL, Timeout):
header = {'User-Agent': str(ua.random)}
try:
page = requests.get(URL, timeout=Timeout, headers=header, verify=False)
except:
return None
return page.text
def scrape_urls(CSV_file, URL_column, clean=False, output_file=None):
requests.packages.urllib3.disable_warnings()
tqdm(disable=True, total=0)
if len(tqdm._instances) > 0:
while len(tqdm._instances) > 0:
tqdm._instances.pop().close()
clear_output(wait=True)
df = pd.read_csv(CSV_file)
if (clean):
df = df[pd.isnull(df[URL_column]) != True]
html_texts = []
for url in tqdm(df[URL_column].tolist(), total=len(df[URL_column])):
text = get_html(fix_http(url), 10)
if(text is None):
text = get_html_ssl(fix_http(url), 10)
html_texts.append(text)
df['Raw_HTML'] = html_texts
if (output_file is None):
df.to_csv("scraped_html_file.csv", index=False)
else:
df.to_csv(output_file, index=False)
return "Successfully Completed!"
def visible_texts(soup):
re_spaces = re.compile(r'\s{3,}')
text = ' '.join([s for s in soup.strings if s.parent.name not in ('style', 'script', 'head', 'title')])
return re_spaces.sub(' ', text)
def language_detector(page):
try:
soup = BeautifulSoup(page, 'html.parser')
for tag in soup.find_all('div', id=re.compile(r'(cook)|(popup)')):
tag.decompose()
for tag in soup.find_all('div', class_=re.compile(r'(cook)|(popup)')):
tag.decompose()
body_text = visible_texts(BeautifulSoup(visible_texts(soup), 'html.parser'))
if len(soup.find_all('frame')) > 0:
frame_text = ''
for f in soup.find_all('frame'):
frame_request = requests.get(f['src'])
frame_soup = BeautifulSoup(frame_request.content, 'html.parser')
frame_text = frame_text + ' ' + visible_texts(BeautifulSoup(visible_texts(frame_soup), 'html.parser'))
body_text = body_text + frame_text
return detect(body_text)
except:
return 'unknown'
def detect_language(CSV_file = 'scraped_html_file.csv', HTML_column = 'Raw_HTML'):
tqdm(disable=True, total=0)
if len(tqdm._instances) > 0:
while len(tqdm._instances) > 0:
tqdm._instances.pop().close()
clear_output(wait=True)
df = pd.read_csv(CSV_file)
languages = []
counter = 0
for html in tqdm(df[HTML_column].tolist(), total=len(df[HTML_column])):
languages.append(language_detector(html))
counter += 1
if(counter % 100 == 0):
time.sleep(30)
df['Languages'] = languages
df.to_csv(CSV_file, index=False)
return "Successfully Completed!"
def language_detector2(URL):
try:
page = requests.get(URL, timeout=10)
soup = BeautifulSoup(page.content, 'html.parser')
for tag in soup.find_all('div', id=re.compile(r'(cook)|(popup)')):
tag.decompose()
for tag in soup.find_all('div', class_=re.compile(r'(cook)|(popup)')):
tag.decompose()
body_text = visible_texts(BeautifulSoup(visible_texts(soup), 'html.parser'))
if len(soup.find_all('frame')) > 0:
frame_text = ''
for f in soup.find_all('frame'):
frame_request = requests.get(f['src'])
frame_soup = BeautifulSoup(frame_request.content, 'html.parser')
frame_text = frame_text + ' ' + visible_texts(BeautifulSoup(visible_texts(frame_soup), 'html.parser'))
body_text = body_text + frame_text
return len(body_text.split()), detect(body_text)
except:
return 0, 'unknown'
def language_switcher(URL, lang_code):
success_boolean = False
try:
page = requests.get(URL)
except:
return success_boolean, ''
soup = BeautifulSoup(page.text, 'html.parser')
returned_list = soup.find_all(hreflang=re.compile(lang_code), href=True)
if (len(returned_list) == 0):
returned_list = soup.find_all(href=True)
for item in returned_list:
lower_string = str(item.text).lower()
if (any(['nl' == word for word in lower_string.split()])):
success_boolean = True
new_page = item['href']
if ('http' not in item['href']):
new_page = URL + item['href'].strip('.')
if language_detector2(new_page)[1] == 'nl':
return success_boolean, new_page
for item in returned_list:
lower_string = str(item['href']).lower()
if (lower_string.find('nl') != -1):
success_boolean = True
new_page = item['href']
if ('http' not in item['href']):
new_page = URL + item['href'].strip('.')
if language_detector2(new_page)[1] == 'nl':
return success_boolean, new_page
return success_boolean, ''
elif (len(returned_list) == 1):
success_boolean = True
new_page = returned_list[0]['href']
if ('http' not in returned_list[0]['href']):
new_page = URL + returned_list[0]['href'].strip('.')
if language_detector2(new_page)[1] == 'nl':
return success_boolean, new_page
elif (len(returned_list) > 1):
success_boolean = True
for item in returned_list:
new_page = item['href']
if (item['href'].find('be') != -1):
if ('http' not in item['href']):
new_page = URL + item['href'].strip('.')
if language_detector2(new_page)[1] == 'nl':
return success_boolean, new_page
new_page = returned_list[0]['href']
if ('http' not in returned_list[0]['href']):
new_page = URL + returned_list[0]['href'].strip('.')
if language_detector2(new_page)[1] == 'nl':
return success_boolean, new_page
else:
return success_boolean, ''
def crawl_contact_page(URL, Base_URL, request_page):
new_pages = []
soup_crawl = BeautifulSoup(request_page.text, 'html.parser')
returned_list = soup_crawl.find_all(href=True)
for item in returned_list:
lower_href_text = ''.join(str(item.text).lower().strip())
if ('cont' in lower_href_text):
if ('www' in item['href']):
new_pages.append(item['href'])
else:
new_page = Base_URL + item['href'].strip('.')
new_pages.append(new_page)
return list(set(new_pages))
def crawl_location_page(URL, Base_URL, request_page):
new_pages = []
soup_crawl = BeautifulSoup(request_page.text, 'html.parser')
returned_list = soup_crawl.find_all(href=True)
for item in returned_list:
lower_href_text = ''.join(str(item.text).lower().strip())
if (('vest' in lower_href_text) | ('loc' in lower_href_text)):
if ('www' in item['href']):
new_pages.append(item['href'])
else:
new_page = Base_URL + item['href'].strip('.')
new_pages.append(new_page)
return list(set(new_pages))
def validate_zip(URL, Base_URL, zip_1, zip_2):
page = requests.get(URL)
contact_pages = crawl_contact_page(URL, Base_URL, page)
location_pages = crawl_location_page(URL, Base_URL, page)
total_pages = contact_pages + location_pages
print(total_pages)
soup = BeautifulSoup(page.text, 'lxml')
[s.decompose() for s in soup('script')]
all_text = ' '.join(re.sub(r'\n', ' ', soup.get_text()).split())
numeric_text = re.findall(r'\d+', all_text)
if (any([str(zip_1) == number for number in numeric_text]) |
any([str(zip_2) == number for number in numeric_text])):
return True
elif (len(total_pages) != 0):
for new_page in total_pages:
time.sleep(3)
page = requests.get(new_page)
soup = BeautifulSoup(page.text, 'lxml')
[s.decompose() for s in soup('script')]
all_text = ' '.join(re.sub(r'\n', ' ', soup.get_text()).split())
numeric_text = re.findall(r'\d+', all_text)
if (any([str(zip_1) == number for number in numeric_text]) |
any([str(zip_2) == number for number in numeric_text])):
return True
return False
def validate_street(URL, Base_URL, street_raw):
page = requests.get(URL)
contact_pages = crawl_contact_page(URL, Base_URL, page)
location_pages = crawl_location_page(URL, Base_URL, page)
total_pages = contact_pages + location_pages
print(total_pages)
soup = BeautifulSoup(page.text, 'lxml')
[s.decompose() for s in soup('script')]
all_text = ' '.join(re.sub(r'\n', ' ', soup.get_text()).split())
street_raw_temp = re.sub(r'\d+', '', street_raw).strip()
final_street = re.sub('[\(\[].*?[\)\]]', '', street_raw_temp)
if (final_street in all_text):
return True
elif (len(total_pages) != 0):
for new_page in total_pages:
time.sleep(3)
page = requests.get(new_page)
soup = BeautifulSoup(page.text, 'lxml')
[s.decompose() for s in soup('script')]
all_text = ' '.join(re.sub(r'\n', ' ', soup.get_text()).split())
if (final_street in all_text):
return True
return False
def extract_url_from_email(Email):
try:
return (re.findall(r'@([A-Za-z.]+)', Email)[0]).strip()
except:
return ''
# Input is 4 columns; cur_email,cur_web,email,web columns
def assign_primary_URL(cur_web, cur_email, web, email):
if not (pd.isnull(cur_web)):
return fix_http(cur_web)
elif not (pd.isnull(cur_email)):
return fix_http(extract_url_from_email(cur_email))
elif not (pd.isnull(web)):
return fix_http(web)
elif not (pd.isnull(email)):
return fix_http(extract_url_from_email(email))
else:
return ''
def get_status_code(URL):
try:
return requests.get(URL, timeout=10).status_code
except:
return 0
def get_NL_URL(URL, status_code):
try:
if status_code == 200:
if language_detector(URL)[1] != 'nl':
success_code, new_url = language_switcher(URL, 'nl')
if success_code & (new_url != ''):
return new_url
return URL
except:
return URL
def switch_language(CSV_file = 'scraped_html_file.csv', language_column = 'Languages', URL_column = 'URL'):
requests.packages.urllib3.disable_warnings()
df = pd.read_csv(CSV_file)
tqdm(disable=True, total=0)
if len(tqdm._instances) > 0:
while len(tqdm._instances) > 0:
tqdm._instances.pop().close()
clear_output(wait=True)
for index, row in tqdm(df.iterrows(), total=df.shape[0]):
if((df.loc[index, language_column] not in ['nl', 'en']) and (pd.isnull(df.loc[index, 'Raw_HTML']) == False)):
if (language_switcher(df.loc[index, URL_column], 'nl') is not None):
success_code, new_url = language_switcher(df.loc[index, URL_column], 'nl')
if success_code & (new_url != ''):
df.loc[index, URL_column] = new_url
df.loc[index, 'Raw_HTML'] = get_html_ssl(fix_http(df.loc[index, URL_column]), 10)
df.loc[index, language_column] = language_detector(df.loc[index, 'Raw_HTML'])
df.to_csv(CSV_file, index=False)
return "Successfully Completed!"
def filter_get_language_distribution(CSV_file = 'scraped_html_file.csv', Language_column = 'Languages'):
df = pd.read_csv(CSV_file)
print(df.Languages.value_counts())
df = df[df[Language_column].isin(['nl', 'en'])]
df.to_csv(CSV_file, index=False)
return "Successfully Completed!"
# Clean Text + Get if any other frames
def clean_pop_cookie_frame(raw_text):
soup = BeautifulSoup(raw_text, 'html.parser')
for tag in soup.find_all('div', id=re.compile(r'(cook)|(popup)')):
tag.decompose()
for tag in soup.find_all('div', class_=re.compile(r'(cook)|(popup)')):
tag.decompose()
body_text = visible_texts(BeautifulSoup(visible_texts(soup), 'html.parser'))
if len(soup.find_all('frame')) > 0:
frame_text = ''
for f in soup.find_all('frame'):
try:
frame_request = requests.get(f['src'], timeout=10)
frame_soup = BeautifulSoup(frame_request.content, 'html.parser')
frame_text = frame_text + ' ' + visible_texts(BeautifulSoup(visible_texts(frame_soup), 'html.parser'))
except:
frame_text = ''
body_text = body_text + frame_text
return body_text.strip()
def lower_punct_number_clean(text, lower_bound_letter_length):
temp_text = re.sub('[^A-Za-z ]+', '', text)
temp_text = ' '.join([i for i in temp_text.split() if len(i) >= lower_bound_letter_length])
return temp_text.lower().strip()
english_stopwords = stopwords.words('english')
dutch_stopwords = stopwords.words('dutch')
def remove_stopwords(text, lang):
if (lang == 'nl'):
temp_text = ' '.join([word for word in text.split() if word not in dutch_stopwords])
return ' '.join([word for word in temp_text.split() if word not in english_stopwords])
elif (lang == 'en'):
return ' '.join([word for word in text.split() if word not in english_stopwords])
else:
return None
english_stemmer = SnowballStemmer(language='english')
dutch_stemmer = SnowballStemmer(language='dutch')
def stem_text(text, lang):
if (text == None):
return None
elif (lang == 'nl'):
return ' '.join([dutch_stemmer.stem(word) for word in text.split()])
elif (lang == 'en'):
return ' '.join([english_stemmer.stem(word) for word in text.split()])
else:
return None
def count_words(text):
if (text == None):
return None
else:
return len(text.split())
def HTML_to_text(CSV_file = 'scraped_html_file.csv', HTML_column = 'Raw_HTML'):
df = | pd.read_csv(CSV_file) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = | date_range('1/1/2000', periods=10) | pandas.date_range |
import pandas as pd
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
import wandb
from utils import set_seed, parse_training_args
from dataset import ToxicDataset
from trainer import Trainer
from model import convert_regressor_to_binary, convert_binary_to_regressor
if __name__ == "__main__":
args = parse_training_args()
config = vars(args)
if config["use_extra_data"]:
extra_files = [
os.path.join(config["extra_data_dir"], f)
for f in os.listdir(config["extra_data_dir"])
]
config["extra_files"] = extra_files
wandb.login()
if config["num_labels"] is None or config["num_labels"] == 1:
project = "jigsaw-train"
else:
project = "jigsaw-binary-train"
with wandb.init(
project=project,
group=str(args.group_id),
name=f"{args.group_id}-{args.checkpoint}",
config=config,
):
config = wandb.config
set_seed(config.seed)
train_data = pd.read_csv(config.train_path)
if config.use_extra_data:
extra_data = [pd.read_csv(f) for f in extra_files]
train_data = | pd.concat([train_data] + extra_data) | pandas.concat |
def PCoA_analysis(TaXon_table_xlsx, meta_data_to_test, taxonomic_level, width, height, pcoa_s, path_to_outdirs, template, font_size, color_discrete_sequence, pcoa_dissimilarity):
import pandas as pd
import numpy as np
from skbio.diversity import beta_diversity
from skbio.stats.ordination import pcoa
from skbio.stats.distance import anosim
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
from pathlib import Path
import PySimpleGUI as sg
import os, webbrowser
from itertools import combinations
TaXon_table_xlsx = Path(TaXon_table_xlsx)
Meta_data_table_xlsx = Path(str(path_to_outdirs) + "/" + "Meta_data_table" + "/" + TaXon_table_xlsx.stem + "_metadata.xlsx")
TaXon_table_df = pd.read_excel(TaXon_table_xlsx, header=0).fillna("unidentified")
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
Meta_data_table_df = pd.read_excel(Meta_data_table_xlsx, header=0).fillna("nan")
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
metadata_list = Meta_data_table_df[meta_data_to_test].values.tolist()
metadata_loc = Meta_data_table_df.columns.tolist().index(meta_data_to_test)
## drop samples with metadata called nan (= empty)
drop_samples = [i[0] for i in Meta_data_table_df.values.tolist() if i[metadata_loc] == "nan"]
if drop_samples != []:
## filter the TaXon table
TaXon_table_df = TaXon_table_df.drop(drop_samples, axis=1)
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
## also remove empty OTUs
row_filter_list = []
for row in TaXon_table_df.values.tolist():
reads = set(row[10:])
if reads != {0}:
row_filter_list.append(row)
columns = TaXon_table_df.columns.tolist()
TaXon_table_df = pd.DataFrame(row_filter_list, columns=columns)
Meta_data_table_df = pd.DataFrame([i for i in Meta_data_table_df.values.tolist() if i[0] not in drop_samples], columns=Meta_data_table_df.columns.tolist())
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
## create a y axis title text
taxon_title = taxonomic_level.lower()
## adjust taxonomic level if neccessary
if taxonomic_level in ["ASVs", "ESVs", "OTUs", "zOTUs"]:
taxon_title = taxonomic_level
taxonomic_level = "ID"
# check if the meta data differs
if len(set(Meta_data_table_df[meta_data_to_test])) == len(Meta_data_table_df['Samples'].tolist()):
sg.Popup("The meta data is unique for all samples. Please adjust the meta data table!", title=("Error"))
raise RuntimeError
# check if the meta data differs
if len(set(Meta_data_table_df[meta_data_to_test])) == 1:
sg.Popup("The meta data is similar for all samples. Please adjust the meta data table!", title=("Error"))
raise RuntimeError
if sorted(TaXon_table_samples) == sorted(Meta_data_table_samples):
samples = Meta_data_table_samples
## extract the relevant data
TaXon_table_df = TaXon_table_df[[taxonomic_level] + samples]
## define an aggregation function to combine multiple hit of one taxonimic level
aggregation_functions = {}
## define samples functions
for sample in samples:
## 'sum' will calculate the sum of p/a data
aggregation_functions[sample] = 'sum'
## define taxon level function
aggregation_functions[taxonomic_level] = 'first'
## create condensed dataframe
TaXon_table_df = TaXon_table_df.groupby(TaXon_table_df[taxonomic_level]).aggregate(aggregation_functions)
if 'unidentified' in TaXon_table_df.index:
TaXon_table_df = TaXon_table_df.drop('unidentified')
data = TaXon_table_df[samples].transpose().values.tolist()
jc_dm = beta_diversity(pcoa_dissimilarity, data, samples)
ordination_result = pcoa(jc_dm)
metadata_list = Meta_data_table_df[meta_data_to_test].values.tolist()
anosim_results = anosim(jc_dm, metadata_list, permutations=999)
anosim_r = round(anosim_results['test statistic'], 5)
anosim_p = anosim_results['p-value']
textbox = meta_data_to_test + ", " + taxon_title + "<br>Anosim " + "R = " + str(anosim_r) + " " + "p = " + str(anosim_p)
#######################################################################################
# create window to ask for PCoA axis to test
def slices(list, slice):
for i in range(0, len(list), slice):
yield list[i : i + slice]
# collect the PCoA proportion explained values
proportion_explained_list = []
for i, pcoa_axis in enumerate(ordination_result.proportion_explained):
if round(pcoa_axis* 100, 2) >= 1:
proportion_explained_list.append("PC" + str(i+1) + " (" + str(round(pcoa_axis* 100, 2)) + " %)")
pcoa_axis_checkboxes = list(slices([sg.Checkbox(name, key=name, size=(15,1)) for name in proportion_explained_list], 10))
pcoa_window_layout = [
[sg.Text('Check up to four axes to be displayed')],
[sg.Frame(layout = pcoa_axis_checkboxes, title = '')],
[sg.Text('Only axes >= 1 % explained variance are shown')],
[sg.CB("Connect categories", default=True, key="draw_mesh")],
[sg.Text('')],
[sg.Button('Plot', key='Plot')],
[sg.Button('Back')],
]
pcoa_window = sg.Window('PCoA axis', pcoa_window_layout, keep_on_top=True)
while True:
event, values = pcoa_window.read()
draw_mesh = values["draw_mesh"]
if event is None or event == 'Back':
break
if event == 'Plot':
## create a subfolder for better sorting and overview
dirName = Path(str(path_to_outdirs) + "/" + "PCoA_plots" + "/" + TaXon_table_xlsx.stem + "/")
if not os.path.exists(dirName):
os.mkdir(dirName)
# collect the pcoa axis values
axis_to_plot = [key for key,value in values.items() if value == True and "PC" in key]
# pass on only if two pcoa axes were checked
if len(axis_to_plot) == 2:
cat1 = axis_to_plot[1].split()[0]
cat2 = axis_to_plot[0].split()[0]
df_pcoa = ordination_result.samples[[cat1, cat2]]
df_pcoa.insert(2, "Metadata", Meta_data_table_df[meta_data_to_test].values.tolist(), True)
df_pcoa.insert(3, "Samples", Meta_data_table_df["Samples"].values.tolist(), True)
if draw_mesh == True:
combinations_list =[]
for metadata in df_pcoa["Metadata"]:
## collect all entries for the respective metadata
arr = df_pcoa.loc[df_pcoa['Metadata'] == metadata][[cat1, cat2, "Metadata", "Samples"]].to_numpy()
## create a df for all possible combinations using itertools combinations
for entry in list(combinations(arr, 2)):
combinations_list.append(list(entry[0]))
combinations_list.append(list(entry[1]))
## create a dataframe to draw the plot from
df = | pd.DataFrame(combinations_list) | pandas.DataFrame |
import calendar
from enum import Enum
from typing import Any, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
class DistrType(Enum):
"""Indicates the type distribution of data in a series."""
Continuous = "continuous"
Binary = "binary"
Categorical = "categorical"
Datetime = "datetime"
def is_continuous(self):
return self == DistrType.Continuous
def is_binary(self):
return self == DistrType.Binary
def is_categorical(self):
return self == DistrType.Categorical
def is_datetime(self):
return self == DistrType.Datetime
def zipped_hist(
data: Tuple[pd.Series, ...],
bin_edges: Optional[np.ndarray] = None,
normalize: bool = True,
ret_bins: bool = False,
distr_type: Optional[str] = None,
) -> Union[Tuple[pd.Series, ...], Tuple[Tuple[pd.Series, ...], Optional[np.ndarray]]]:
"""Bins a tuple of series' and returns the aligned histograms.
Args:
data (Tuple[pd.Series, ...]):
A tuple consisting of the series' to be binned. All series' must have the same dtype.
bin_edges (Optional[np.ndarray], optional):
Bin edges to bin continuous data by. Defaults to None.
normalize (bool, optional):
Normalize the histograms, turning them into pdfs. Defaults to True.
ret_bins (bool, optional):
Returns the bin edges used in the histogram. Defaults to False.
distr_type (Optional[str]):
The type of distribution of the target attribute. Can be "categorical" or "continuous".
If None the type of distribution is inferred based on the data in the column.
Defaults to None.
Returns:
Union[Tuple[np.ndarray, ...], Tuple[Tuple[np.ndarray, ...], Optional[np.ndarray]]]:
A tuple of np.ndarrays consisting of each histogram for the input data.
Additionally returns bins if ret_bins is True.
"""
joint = pd.concat(data)
is_continuous = distr_type == "continuous" if distr_type is not None else infer_distr_type(joint).is_continuous()
# Compute histograms of the data, bin if continuous
if is_continuous:
# Compute shared bin_edges if not given, and use np.histogram to form histograms
if bin_edges is None:
bin_edges = np.histogram_bin_edges(joint, bins="auto")
hists = [np.histogram(series, bins=bin_edges)[0] for series in data]
if normalize:
with np.errstate(divide="ignore", invalid="ignore"):
hists = [np.nan_to_num(hist / hist.sum()) for hist in hists]
else:
# For categorical data, form histogram using value counts and align
space = joint.unique()
dicts = [sr.value_counts(normalize=normalize) for sr in data]
hists = [np.array([d.get(val, 0) for val in space]) for d in dicts]
ps = [pd.Series(hist) for hist in hists]
if ret_bins:
return tuple(ps), bin_edges
return tuple(ps)
def bin(
column: pd.Series,
n_bins: Optional[int] = None,
remove_outliers: Optional[float] = 0.1,
quantile_based: bool = False,
bin_centers=False,
**kwargs,
) -> pd.Series:
"""Bin continous values into discrete bins.
Args:
column (pd.Series):
The column or series containing the data to be binned.
n_bins (Optional[int], optional):
The number of bins. Defaults to Freedman-Diaconis rule.
remove_outliers (Optional[float], optional):
Any data point outside this quantile (two-sided) will be dropped before computing bins.
If `None`, outliers are not removed. Defaults to 0.1.
quantile_based (bool, optional):
Whether the bin computation is quantile based. Defaults to False.
bin_centers (bool, optional):
Return the mean of the intervals instead of the intervals themselves. Defaults to False.
**kwargs:
Key word arguments for pd.cut or pd.qcut.
Returns:
pd.Series:
The binned column.
"""
column = infer_dtype(column)
column_clean = column.dropna()
n_bins = n_bins or fd_opt_bins(column)
if remove_outliers:
percentiles = [remove_outliers * 100.0 / 2, 100 - remove_outliers * 100.0 / 2]
start, end = np.percentile(column_clean, percentiles)
if start == end:
start, end = min(column_clean), max(column_clean)
column_clean = column_clean[(start <= column_clean) & (column_clean <= end)]
if not quantile_based:
_, bins = pd.cut(column_clean, n_bins, retbins=True, **kwargs)
else:
_, bins = pd.qcut(column_clean, n_bins, retbins=True, **kwargs)
bins = list(bins) # Otherwise it is np.ndarray
bins[0], bins[-1] = column.min(), column.max()
# Manually construct interval index for dates as pandas can't do a quantile date interval by itself.
if isinstance(bins[0], pd.Timestamp):
bins = pd.IntervalIndex([pd.Interval(bins[n], bins[n + 1]) for n in range(len(bins) - 1)], closed="left")
binned = pd.Series(pd.cut(column, bins=bins, include_lowest=True, **kwargs))
if bin_centers:
binned = binned.apply(lambda i: i.mid)
return binned
def quantize_date(column: pd.Series) -> pd.Series:
"""Quantize a column of dates into bins of uniform width in years, days or months.
Args:
column (pd.Series):
The column of dates to quantize. Must be have a dtype of "datetime64[ns]".
Returns:
pd.Series:
Quantized series.
"""
TEN_YEAR_THRESHOLD = 15
TEN_MIN_THRESHOLD = 15
TEN_SEC_THRESHOLD = 15
if column.dtype != "datetime64[ns]":
raise ValueError("'quantize_date' requires the column to be a pandas datetime object")
years, months, days = column.dt.year, column.dt.month, column.dt.day
hours, minutes, seconds = column.dt.hour, column.dt.minute, column.dt.second
# Assuming dates don't go back beyond a 100 years.
if years.max() - years.min() >= TEN_YEAR_THRESHOLD:
return ((years // 10) * 10).apply(lambda x: str(x) + "-" + str(x + 10))
elif years.nunique() > 1:
return years
elif months.nunique() > 1:
return months.apply(lambda x: calendar.month_abbr[x])
elif days.nunique() > 1:
return days.apply(lambda x: "Day " + str(x))
elif hours.nunique() > 1:
return hours.apply(lambda x: "Hour " + str(x))
elif minutes.max() - minutes.min() > TEN_MIN_THRESHOLD:
return ((minutes // 10) * 10).apply(lambda x: str(x) + "min-" + str(x + 10) + "min")
elif minutes.nunique() > 1:
return minutes.apply(lambda x: str(x) + "m")
elif seconds.max() - seconds.min() > TEN_SEC_THRESHOLD:
return ((seconds // 10) * 10).apply(lambda x: str(x) + "s-" + str(x + 10) + "s")
return seconds
def infer_dtype(col: pd.Series) -> pd.Series:
"""Infers the type of the data and converts the data to it.
Args:
col (str):
The column of the dataframe to transform.
Returns:
pd.Series:
The column converted to its inferred type.
"""
column = col.copy()
in_dtype = str(column.dtype)
# Try to convert it to numeric
if column.dtype.kind not in ("i", "u", "f", "M"):
n_nans = column.isna().sum()
col_num = | pd.to_numeric(column, errors="coerce") | pandas.to_numeric |
""" database.py -- sqlite storage of relevant functional info for Harvey.
Language: Python 3.9
"""
from typing import Tuple
import json
import logging
import pathlib
import sqlite3
import pandas as pd
from harvey.utils.exceptions import DatabaseError
MAPPING_FILE = pathlib.Path(__file__).parents[1].joinpath("json/db_tables.json")
class Database(object):
"""Container for bot items."""
def __init__(self, database: pathlib.Path):
self.database = pathlib.Path(database)
if self.database.exists():
logging.debug(
f"Initializing database connection to DB file at "
f"'{self.database.name}'."
)
else:
logging.debug(f"Creating database at DB file '{self.database.name}'.")
self.rr_role_table = None # Set for linting.
self.rr_post_table = None
self.logging_channel_table = None
self.sales_feed_table = None
with open(MAPPING_FILE, "r") as f:
self.mapping = json.loads(f.read())
for var, info in self.mapping["tables"].items():
setattr(self, var, info["name"])
self.create_tables()
def get_conn(self) -> Tuple[sqlite3.Connection, sqlite3.Cursor]:
"""Helper function to connect to the internal database.
Returns
----------
Tuple[sqlite3.Connection, sqlite3.Cursor]
Connection and cursor objects.
"""
conn = sqlite3.connect(self.database)
cursor = conn.cursor()
return conn, cursor
def create_tables(self):
"""Create database tables if they don't already exist.
Parameters
----------
query: str
Table create query.
"""
logging.debug("Beginning table instantiation.")
conn, cursor = self.get_conn()
for table_type, table_info in self.mapping["tables"].items():
logging.debug(f"Creating {table_type} table '{table_info['name']}'.")
cursor.execute(table_info["create_query"])
conn.commit()
conn.close()
logging.info("Completed table instantiation.")
def role_exists(
self,
emoji_id: int,
role_id: int,
guild_id: int,
) -> bool:
"""Check if a role exists on the self.rr_role_table table.
Parameters
----------
emoji_id: int
Emoji ID to check.
role_id: int
Role ID to check.
guild_id: int
Guild ID to check.
Returns
----------
bool
True if role was found. False otherwise.
"""
query = (
f"SELECT * FROM '{self.rr_role_table}' WHERE 1=1 AND emoji_id = {emoji_id} "
f"AND role_id = {role_id} AND guild_id = {guild_id}"
)
conn, _ = self.get_conn()
res = | pd.read_sql_query(query, con=conn) | pandas.read_sql_query |
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not | pd.isnull(crop_r03c06) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not pd.isnull(crop_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c07']''')[0]
if not pd.isnull(crop_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c08']''')[0]
if not pd.isnull(crop_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c09']''')[0]
if not pd.isnull(crop_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c10']''')[0]
if not pd.isnull(crop_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c11']''')[0]
if not pd.isnull(crop_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c12']''')[0]
if not pd.isnull(crop_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03']''')[0]
if not pd.isnull(crop_r03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c01']''')[0]
if not pd.isnull(crop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c02']''')[0]
if not pd.isnull(crop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c03']''')[0]
if not pd.isnull(crop_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c04']''')[0]
if not pd.isnull(crop_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c05']''')[0]
if not pd.isnull(crop_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c06']''')[0]
if not pd.isnull(crop_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c07']''')[0]
if not pd.isnull(crop_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c08']''')[0]
if not pd.isnull(crop_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c09']''')[0]
if not pd.isnull(crop_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c10']''')[0]
if not pd.isnull(crop_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c11']''')[0]
if not pd.isnull(crop_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c12']''')[0]
if not pd.isnull(crop_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04']''')[0]
if not pd.isnull(crop_r04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c01']''')[0]
if not pd.isnull(noncrop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c02']''')[0]
if not pd.isnull(noncrop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01']''')[0]
if not pd.isnull(noncrop_r01) and noncrop_r01 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c01']''')[0]
if not pd.isnull(noncrop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c02']''')[0]
if not pd.isnull(noncrop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02']''')[0]
if not pd.isnull(noncrop_r02) and noncrop_r02 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c01']''')[0]
if not pd.isnull(noncrop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c02']''')[0]
if not pd.isnull(noncrop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03']''')[0]
if not pd.isnull(noncrop_r03) and noncrop_r03 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c01']''')[0]
if not pd.isnull(noncrop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c02']''')[0]
if not pd.isnull(noncrop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04']''')[0]
if not pd.isnull(noncrop_r04) and noncrop_r04 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
# Part 2
xml_txt_box = tree1.findall('''.//*[@id='ag_water_cons']''')[0]
if not pd.isnull(ag_water_cons):
xml_txt_box.getchildren()[0].text = '%.2f' % ag_water_cons
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c01']''')[0]
if not pd.isnull(lp_r01c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c02']''')[0]
if not pd.isnull(lp_r01c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c03']''')[0]
if not pd.isnull(lp_r01c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c04']''')[0]
if not pd.isnull(lp_r01c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c05']''')[0]
if not pd.isnull(lp_r01c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c06']''')[0]
if not pd.isnull(lp_r01c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c07']''')[0]
if not pd.isnull(lp_r01c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c08']''')[0]
if not pd.isnull(lp_r01c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c09']''')[0]
if not pd.isnull(lp_r01c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c10']''')[0]
if not pd.isnull(lp_r01c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c11']''')[0]
if not pd.isnull(lp_r01c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c12']''')[0]
if not pd.isnull(lp_r01c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c01']''')[0]
if not pd.isnull(lp_r02c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c02']''')[0]
if not pd.isnull(lp_r02c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c03']''')[0]
if not pd.isnull(lp_r02c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c04']''')[0]
if not pd.isnull(lp_r02c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c05']''')[0]
if not pd.isnull(lp_r02c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c06']''')[0]
if not pd.isnull(lp_r02c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c07']''')[0]
if not pd.isnull(lp_r02c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c08']''')[0]
if not pd.isnull(lp_r02c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c09']''')[0]
if not pd.isnull(lp_r02c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c10']''')[0]
if not pd.isnull(lp_r02c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c11']''')[0]
if not pd.isnull(lp_r02c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c12']''')[0]
if not pd.isnull(lp_r02c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c01']''')[0]
if not pd.isnull(lp_r03c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c02']''')[0]
if not pd.isnull(lp_r03c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c03']''')[0]
if not pd.isnull(lp_r03c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c04']''')[0]
if not pd.isnull(lp_r03c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c05']''')[0]
if not pd.isnull(lp_r03c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c06']''')[0]
if not pd.isnull(lp_r03c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c07']''')[0]
if not pd.isnull(lp_r03c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c08']''')[0]
if not pd.isnull(lp_r03c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c09']''')[0]
if not pd.isnull(lp_r03c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c10']''')[0]
if not pd.isnull(lp_r03c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c11']''')[0]
if not pd.isnull(lp_r03c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c12']''')[0]
if not pd.isnull(lp_r03c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c01']''')[0]
if not pd.isnull(lp_r04c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c02']''')[0]
if not pd.isnull(lp_r04c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c03']''')[0]
if not pd.isnull(lp_r04c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c04']''')[0]
if not pd.isnull(lp_r04c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c05']''')[0]
if not pd.isnull(lp_r04c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c06']''')[0]
if not pd.isnull(lp_r04c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c07']''')[0]
if not pd.isnull(lp_r04c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c08']''')[0]
if not pd.isnull(lp_r04c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c09']''')[0]
if not pd.isnull(lp_r04c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c10']''')[0]
if not pd.isnull(lp_r04c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c11']''')[0]
if not pd.isnull(lp_r04c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c12']''')[0]
if not pd.isnull(lp_r04c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c01']''')[0]
if not pd.isnull(wp_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c02']''')[0]
if not pd.isnull(wp_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c03']''')[0]
if not pd.isnull(wp_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c04']''')[0]
if not pd.isnull(wp_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c05']''')[0]
if not pd.isnull(wp_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c06']''')[0]
if not pd.isnull(wp_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c07']''')[0]
if not pd.isnull(wp_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c08']''')[0]
if not pd.isnull(wp_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c09']''')[0]
if not pd.isnull(wp_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c10']''')[0]
if not pd.isnull(wp_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c11']''')[0]
if not pd.isnull(wp_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c12']''')[0]
if not pd.isnull(wp_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c01']''')[0]
if not pd.isnull(wp_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c02']''')[0]
if not pd.isnull(wp_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c03']''')[0]
if not pd.isnull(wp_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c04']''')[0]
if not pd.isnull(wp_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c05']''')[0]
if not pd.isnull(wp_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c06']''')[0]
if not pd.isnull(wp_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c07']''')[0]
if not pd.isnull(wp_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c08']''')[0]
if not pd.isnull(wp_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c09']''')[0]
if not pd.isnull(wp_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c10']''')[0]
if not pd.isnull(wp_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c11']''')[0]
if not pd.isnull(wp_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c12']''')[0]
if not pd.isnull(wp_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c01']''')[0]
if not pd.isnull(wp_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c02']''')[0]
if not pd.isnull(wp_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c03']''')[0]
if not | pd.isnull(wp_r03c03) | pandas.isnull |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'eddy_src/q_learning_stock'))
print(os.getcwd())
except:
pass
import numpy as np
import pandas as pd
import indicators
import util
import config
import pred_model
import matplotlib.pyplot as plt
import datetime
import sys
from bokeh.io import curdoc, output_notebook, show
from bokeh.layouts import row, column, gridplot
from bokeh.models import ColumnDataSource, RangeTool, BoxAnnotation, HoverTool
from bokeh.models.widgets import Slider, TextInput
from bokeh.plotting import figure, output_file
from bokeh.core.properties import value
#%%
# Stock formed indexes vs actual index
def plot_passive_daily_comparisons(df_list: list, stock:str):
"""**First dataframe must be the Portfolio with switching.**
"""
temp_df1 = df_list[0].iloc[0:0]
# temp_df1.drop(temp_df1.columns[0],axis=1,inplace=True)
temp_df2 = df_list[1].iloc[0:0]
# temp_df2.drop(temp_df2.columns[0],axis=1,inplace=True)
temp_date_range = df_list[0]['Date'].tolist()
for date in temp_date_range:
df1 = df_list[0][df_list[0]['Date'] == date]
# df1.drop(df1.columns[0],axis=1,inplace=True)
# print(df1)
# sys.exit()
df2 = df_list[1][df_list[1]['Date'] == date]
if not (df1.empty or df2.empty):
temp_df1.append(df1, ignore_index=True)
temp_df2.append(df2, ignore_index=True)
# print(temp_df1)
# sys.exit()
p = figure(title="Daily price Comparison", x_axis_type='datetime', background_fill_color="#fafafa")
p.add_tools(HoverTool(
tooltips=[
( 'Date', '@x{%F}'),
( 'Price', '$@y{%0.2f}'), # use @{ } for field names with spaces
],
formatters={
'x': 'datetime', # use 'datetime' formatter for 'date' field,
'y' : 'printf'
},
mode='mouse'
))
p.line(temp_df1['Date'].tolist(), temp_df1['Net'].values.tolist(), legend="Rebalanced stock portfolio",
line_color="black")
p.line(temp_df2['Date'].tolist(), temp_df2[stock].values.tolist(), legend=f"{stock} index")
p.legend.location = "top_left"
show(p)
stock_list = ['^BVSP', '^TWII', '^IXIC']
for symbol in stock_list:
daily_df = pd.read_csv(f'data/algo/{symbol}/daily_nav.csv', parse_dates=['Date'])
passive_daily_df = pd.read_csv('data/algo/index/passive_daily_nav.csv', parse_dates=['Date'])
df_list = [daily_df, passive_daily_df]
plot_passive_daily_comparisons(df_list, symbol)
#%%
df1 = | pd.read_csv('data/goldman/GGSIX.csv') | pandas.read_csv |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 10:24:34 2019
@author: labadmin
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 21:05:32 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier as GBC
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.over_sampling import SMOTENC
data_ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset1.csv",skiprows=4)
data_ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset2.csv",skiprows=4)
data_ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset3.csv",skiprows=4)
data_ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset4.csv",skiprows=4)
data_ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset5.csv",skiprows=4)
data_ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset6.csv",skiprows=4)
data_ben7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset7.csv",skiprows=4)
frames_ben1 = [data_ben1,data_ben2,data_ben3,data_ben4,data_ben5,data_ben6,data_ben7]
result_ben1 = pd.concat(frames_ben1)
result_ben1.index=range(3360)
df_ben1 = pd.DataFrame({'label': [1]},index=range(0,3360))
dat_ben1=pd.concat([result_ben1,df_ben1],axis=1)
#-------------------------------------------------------------------------------------------------
data__ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset1.csv",skiprows=4)
data__ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset2.csv",skiprows=4)
data__ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset3.csv",skiprows=4)
data__ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset4.csv",skiprows=4)
data__ben4=data__ben4['# Columns: time'].str.split(expand=True)
data__ben4.columns=['# Columns: time','avg_rss12','var_rss12','avg_rss13','var_rss13','avg_rss23','var_rss23']
data__ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset5.csv",skiprows=4)
data__ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset6.csv",skiprows=4)
frames_ben2 = [data__ben1,data__ben2,data__ben3,data__ben4,data__ben5,data__ben6]
result_ben2 = pd.concat(frames_ben2)
result_ben2.index=range(2880)
df_ben2 = pd.DataFrame({'label': [2]},index=range(0,2880))
dat__ben2=pd.concat([result_ben2,df_ben2],axis=1)
#-----------------------------------------------------------------------------------------------------
data_cyc1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset1.csv",skiprows=4)
data_cyc2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset2.csv",skiprows=4)
data_cyc3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset3.csv",skiprows=4)
data_cyc4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset4.csv",skiprows=4)
data_cyc5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset5.csv",skiprows=4)
data_cyc6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset6.csv",skiprows=4)
data_cyc7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset7.csv",skiprows=4)
data_cyc8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset8.csv",skiprows=4)
data_cyc9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset99.csv",skiprows=4)
data_cyc10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset10.csv",skiprows=4)
data_cyc11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset11.csv",skiprows=4)
data_cyc12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset12.csv",skiprows=4)
data_cyc13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset13.csv",skiprows=4)
data_cyc14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset144.csv",skiprows=4)
data_cyc15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset15.csv",skiprows=4)
frames_cyc = [data_cyc1,data_cyc2,data_cyc3,data_cyc4,data_cyc5,data_cyc6,data_cyc7,data_cyc8,data_cyc9,data_cyc10,data_cyc11,data_cyc12,data_cyc13,data_cyc14,data_cyc15]
result_cyc = pd.concat(frames_cyc)
result_cyc.index=range(7200)
df_cyc = pd.DataFrame({'label': [3]},index=range(0,7200))
data_cyc=pd.concat([result_cyc,df_cyc],axis=1)
#----------------------------------------------------------------------------------------------
data_ly1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset1.csv",skiprows=4)
data_ly2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset2.csv",skiprows=4)
data_ly3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset3.csv",skiprows=4)
data_ly4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset4.csv",skiprows=4)
data_ly5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset5.csv",skiprows=4)
data_ly6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset6.csv",skiprows=4)
data_ly7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset7.csv",skiprows=4)
data_ly8= | pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset8.csv",skiprows=4) | pandas.read_csv |
import sys
import os
sys.path.append(os.getcwd())
import pyomo.environ as pyo
from typing import Dict, List, Tuple
from time import time
from pyomo.core import Constraint # TODO: Remove this and use pyo.Constraint.Feasible/Skip
from tabulate import tabulate
from pyomo.util.infeasible import log_infeasible_constraints
import argparse
import pandas as pd
from src.alns.solution import ProblemDataExtended
from src.read_problem_data import ProblemData
import src.alns.alns
class FfprpModel:
def __init__(self,
prbl: ProblemData,
extended_model: bool = False,
y_init_dict: Dict[Tuple[str, str, int], int] = None,
) -> None:
# GENERAL MODEL SETUP
self.m = pyo.ConcreteModel()
self.solver_factory = pyo.SolverFactory('gurobi')
self.results = None
self.solution = None
self.extended_model = extended_model
################################################################################################################
# SETS #########################################################################################################
# NODE SETS
self.m.NODES = pyo.Set(initialize=prbl.nodes)
self.m.NODES_INCLUDING_DUMMIES = pyo.Set(
initialize=prbl.nodes + ['d_0', 'd_-1']) # d_0 is dummy origin, d_-1 is dummy destination
self.m.NODES_INCLUDING_DUMMY_START = pyo.Set(initialize=prbl.nodes + ['d_0'])
self.m.NODES_INCLUDING_DUMMY_END = pyo.Set(initialize=prbl.nodes + ['d_-1'])
self.m.FACTORY_NODES = pyo.Set(initialize=prbl.factory_nodes)
self.m.ORDER_NODES = pyo.Set(initialize=prbl.order_nodes)
self.m.PRODUCTS = pyo.Set(initialize=prbl.products)
self.m.VESSELS = pyo.Set(initialize=prbl.vessels)
# ARCS
self.m.ARCS = pyo.Set(self.m.VESSELS,
initialize=prbl.arcs_for_vessels)
arcs_for_vessels_trip = [(v, i, j) for v in self.m.VESSELS for i, j in prbl.arcs_for_vessels[v]]
self.m.ARCS_FOR_VESSELS_TRIP = pyo.Set(initialize=arcs_for_vessels_trip)
# TIME PERIOD SETS
self.m.TIME_PERIODS = pyo.Set(initialize=prbl.time_periods)
# TUPLE SETS
orders_related_to_nodes_tup = [(factory_node, order_node)
for factory_node in prbl.factory_nodes
for order_node in prbl.order_nodes] + [
(order_node, order_node) for order_node in prbl.order_nodes]
self.m.ORDERS_RELATED_TO_NODES_TUP = pyo.Set(dimen=2, initialize=orders_related_to_nodes_tup)
nodes_for_vessels_tup = [(vessel, node)
for vessel, node in prbl.nodes_for_vessels.keys()
if prbl.nodes_for_vessels[vessel, node] == 1]
self.m.NODES_FOR_VESSELS_TUP = pyo.Set(dimen=2, initialize=nodes_for_vessels_tup)
factory_nodes_for_vessels_tup = [(vessel, node)
for vessel, node in nodes_for_vessels_tup
if node in prbl.factory_nodes]
self.m.FACTORY_NODES_FOR_VESSELS_TUP = pyo.Set(dimen=2, initialize=factory_nodes_for_vessels_tup)
order_nodes_for_vessels_tup = [(vessel, node)
for vessel, node in nodes_for_vessels_tup
if node in prbl.order_nodes]
self.m.ORDER_NODES_FOR_VESSELS_TUP = pyo.Set(dimen=2, initialize=order_nodes_for_vessels_tup)
vessels_relevantnodes_ordernodes = [(vessel, relevant_node, order_node)
for vessel, order_node in order_nodes_for_vessels_tup
for relevant_node, order_node2 in orders_related_to_nodes_tup
if order_node2 == order_node
and (vessel, relevant_node) in nodes_for_vessels_tup
]
self.m.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TRIP = pyo.Set(dimen=3,
initialize=vessels_relevantnodes_ordernodes)
vessels_factorynodes_ordernodes = [(vessel, factory_node, order_node)
for vessel, order_node in order_nodes_for_vessels_tup
for vessel2, factory_node in factory_nodes_for_vessels_tup
if vessel == vessel2
]
self.m.ORDER_NODES_FACTORY_NODES_FOR_VESSELS_TRIP = pyo.Set(dimen=3, initialize=vessels_factorynodes_ordernodes)
vessels_for_factory_nodes_tup = [(node, vessel)
for vessel, node in nodes_for_vessels_tup
if node in prbl.factory_nodes]
self.m.VESSELS_FOR_FACTORY_NODES_TUP = pyo.Set(dimen=2, initialize=vessels_for_factory_nodes_tup)
time_windows_for_orders_tup = [(order, time_period)
for order, time_period in prbl.time_windows_for_orders.keys()
if prbl.time_windows_for_orders[order, time_period] == 1]
self.m.TIME_WINDOWS_FOR_ORDERS_TUP = pyo.Set(dimen=2, initialize=time_windows_for_orders_tup)
self.m.PRODUCTION_LINES = pyo.Set(initialize=prbl.production_lines)
self.m.PRODUCTION_LINES_FOR_FACTORIES_TUP = pyo.Set(dimen=2, initialize=prbl.production_lines_for_factories)
products_within_same_product_group_tup = [(prod1, prod2)
for product_group in prbl.product_groups.keys()
for prod1 in prbl.product_groups[product_group]
for prod2 in prbl.product_groups[product_group]]
self.m.PRODUCTS_WITHIN_SAME_PRODUCT_GROUP_TUP = pyo.Set(dimen=2,
initialize=products_within_same_product_group_tup)
self.m.ZONES = pyo.Set(initialize=prbl.orders_for_zones.keys())
orders_for_zones_tup = [(zone, order)
for zone, li in prbl.orders_for_zones.items()
for order in li]
self.m.ORDERS_FOR_ZONES_TUP = pyo.Set(dimen=2, initialize=orders_for_zones_tup)
green_nodes_for_vessel_tup = [(vessel, node)
for vessel, node in nodes_for_vessels_tup
if node in prbl.orders_for_zones['green']]
self.m.GREEN_NODES_FOR_VESSEL_TUP = pyo.Set(dimen=2, initialize=green_nodes_for_vessel_tup)
green_and_yellow_nodes_for_vessel_tup = [(vessel, node)
for vessel, node in nodes_for_vessels_tup
if node in prbl.orders_for_zones['green'] + prbl.orders_for_zones[
'yellow']]
self.m.GREEN_AND_YELLOW_NODES_FOR_VESSEL_TUP = pyo.Set(initialize=green_and_yellow_nodes_for_vessel_tup)
# sick_arcs_tup = list(set([(orig, dest)
# for (v, orig, dest) in prbl.min_wait_if_sick.keys()
# if prbl.min_wait_if_sick[v, orig, dest] > 0]))
# self.m.WAIT_EDGES = pyo.Set(dimen=2,
# initialize=sick_arcs_tup) # prbl.min_wait_if_sick.keys())
wait_edges_for_vessels_trip = [(v, i, j)
for v in self.m.VESSELS
for u, i, j in prbl.min_wait_if_sick.keys()
if u == v and (i, j) in self.m.ARCS[v]]
self.m.WAIT_EDGES_FOR_VESSEL_TRIP = pyo.Set(dimen=3, initialize=wait_edges_for_vessels_trip)
# Extension
if extended_model:
self.m.TIME_WINDOW_VIOLATIONS = pyo.Set(
initialize=[i for i in range(-prbl.max_tw_violation, prbl.max_tw_violation + 1)])
print("Done setting sets!")
################################################################################################################
# PARAMETERS ###################################################################################################
self.m.vessel_ton_capacities = pyo.Param(self.m.VESSELS,
initialize=prbl.vessel_ton_capacities)
self.m.vessel_nprod_capacities = pyo.Param(self.m.VESSELS,
initialize=prbl.vessel_nprod_capacities)
# self.m.production_min_capacities = pyo.Param(self.m.PRODUCTION_LINES,
# self.m.PRODUCTS,
# initialize=prbl.production_min_capacities)
self.m.production_max_capacities = pyo.Param(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
initialize=prbl.production_max_capacities)
self.m.production_start_costs = pyo.Param(self.m.FACTORY_NODES,
self.m.PRODUCTS,
initialize=prbl.production_start_costs)
self.m.production_stops = pyo.Param(self.m.FACTORY_NODES,
self.m.TIME_PERIODS,
initialize=prbl.production_stops)
self.m.factory_inventory_capacities = pyo.Param(self.m.FACTORY_NODES,
initialize=prbl.factory_inventory_capacities)
self.m.factory_initial_inventories = pyo.Param(self.m.FACTORY_NODES,
self.m.PRODUCTS,
initialize=prbl.factory_initial_inventories)
self.m.inventory_unit_costs = pyo.Param(self.m.FACTORY_NODES,
initialize=prbl.inventory_unit_costs)
self.m.transport_unit_costs = pyo.Param(self.m.VESSELS,
initialize=prbl.transport_unit_costs)
self.m.transport_times = pyo.Param(self.m.VESSELS,
self.m.NODES_INCLUDING_DUMMY_START,
self.m.NODES_INCLUDING_DUMMY_END,
initialize=prbl.transport_times)
self.m.transport_times_exact = pyo.Param(self.m.VESSELS,
self.m.NODES_INCLUDING_DUMMY_START,
self.m.NODES_INCLUDING_DUMMY_END,
initialize=prbl.transport_times_exact)
self.m.loading_unloading_times = pyo.Param(self.m.VESSELS,
self.m.NODES,
initialize=prbl.loading_unloading_times)
self.m.demands = pyo.Param(self.m.ORDERS_RELATED_TO_NODES_TUP,
self.m.PRODUCTS,
initialize=prbl.demands)
self.m.production_line_min_times = pyo.Param(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
initialize=prbl.production_line_min_times)
self.m.start_time_for_vessels = pyo.Param(self.m.VESSELS,
initialize=prbl.start_times_for_vessels)
self.m.vessel_initial_locations = pyo.Param(self.m.VESSELS,
initialize=prbl.vessel_initial_locations,
within=pyo.Any)
self.m.factory_max_vessels_destination = pyo.Param(self.m.FACTORY_NODES,
initialize=prbl.factory_max_vessels_destination)
self.m.factory_max_vessels_loading = pyo.Param(self.m.FACTORY_NODES,
self.m.TIME_PERIODS,
initialize=prbl.factory_max_vessels_loading)
self.m.external_delivery_penalties = pyo.Param(self.m.ORDER_NODES,
initialize=prbl.external_delivery_penalties)
self.m.min_wait_if_sick = pyo.Param(self.m.WAIT_EDGES_FOR_VESSEL_TRIP,
initialize=prbl.min_wait_if_sick)
# self.m.warm_start = pyo.Param({0},
# initialize={0:False},
# mutable=True)
# Extension
if extended_model:
tw_violation_unit_cost = {k: prbl.tw_violation_unit_cost * abs(k) for k in self.m.TIME_WINDOW_VIOLATIONS}
self.m.time_window_violation_cost = pyo.Param(self.m.TIME_WINDOW_VIOLATIONS,
initialize=tw_violation_unit_cost)
# Fetch first (min) and last (max) time period within the time window of each order
tw_min, tw_max = {}, {}
for i in self.m.ORDER_NODES:
tw_min[i] = min(t for i2, t in time_windows_for_orders_tup if i == i2)
tw_max[i] = max(t for i2, t in time_windows_for_orders_tup if i == i2)
self.m.tw_min = pyo.Param(self.m.ORDER_NODES, initialize=tw_min)
self.m.tw_max = pyo.Param(self.m.ORDER_NODES, initialize=tw_max)
# self.m.inventory_targets = pyo.Param(self.m.FACTORY_NODES,
# self.m.PRODUCTS,
# initialize=prbl.inventory_targets)
self.m.inventory_unit_rewards = pyo.Param(self.m.FACTORY_NODES,
initialize=prbl.inventory_unit_rewards)
print("Done setting parameters!")
################################################################################################################
# VARIABLES ####################################################################################################
self.m.x = pyo.Var(self.m.ARCS_FOR_VESSELS_TRIP,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
self.m.w = pyo.Var(self.m.VESSELS,
self.m.NODES,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
# def y_init(m, v, i, t):
# return y_init_dict[(v, i, t)] if (y_init_dict is not None and prbl.is_order_node(i)) else 0
self.m.y = pyo.Var(self.m.NODES_FOR_VESSELS_TUP,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
self.m.z = pyo.Var(self.m.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TRIP,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
self.m.l = pyo.Var(self.m.VESSELS,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
domain=pyo.NonNegativeReals,
initialize=0)
self.m.h = pyo.Var(self.m.VESSELS,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
# self.m.q = pyo.Var(self.m.PRODUCTION_LINES,
# self.m.PRODUCTS,
# self.m.TIME_PERIODS,
# domain=pyo.NonNegativeReals,
# initialize=0)
self.m.g = pyo.Var(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
self.m.s = pyo.Var(self.m.FACTORY_NODES,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
domain=pyo.NonNegativeReals,
initialize=0)
self.m.a = pyo.Var(self.m.PRODUCTION_LINES,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
self.m.delta = pyo.Var(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
domain=pyo.Boolean,
initialize=0)
self.m.e = pyo.Var(self.m.ORDER_NODES,
domain=pyo.Boolean,
initialize=0)
# Extension
if extended_model:
self.m.lambd = pyo.Var(self.m.ORDER_NODES,
self.m.TIME_WINDOW_VIOLATIONS,
domain=pyo.Boolean,
initialize=0)
self.m.s_plus = pyo.Var(self.m.FACTORY_NODES,
self.m.PRODUCTS,
domain=pyo.NonNegativeReals,
initialize=0)
print("Done setting variables!")
################################################################################################################
# OBJECTIVE ####################################################################################################
def obj(model):
return (sum(model.inventory_unit_costs[i] * model.s[i, p, t]
for t in model.TIME_PERIODS
for p in model.PRODUCTS
for i in model.FACTORY_NODES)
+ sum(model.transport_unit_costs[v] * model.transport_times_exact[v, i, j] * model.x[v, i, j, t]
for t in model.TIME_PERIODS
for v in model.VESSELS
for i, j in model.ARCS[v]
if i != 'd_0' and j != 'd_-1')
+ sum(model.production_start_costs[i, p] * model.delta[l, p, t]
for i in model.FACTORY_NODES
for (ii, l) in model.PRODUCTION_LINES_FOR_FACTORIES_TUP if i == ii
for p in model.PRODUCTS
for t in model.TIME_PERIODS)
+ sum(model.external_delivery_penalties[i] * model.e[i] for i in model.ORDER_NODES))
def obj_extended(model):
return (obj(model)
+ sum(model.time_window_violation_cost[k] * model.lambd[i, k]
for i in model.ORDER_NODES
for k in model.TIME_WINDOW_VIOLATIONS)
- sum(model.inventory_unit_rewards[i] * model.s_plus[i, p]
for i in model.FACTORY_NODES
for p in model.PRODUCTS))
if extended_model:
self.m.objective = pyo.Objective(rule=obj_extended, sense=pyo.minimize)
else:
self.m.objective = pyo.Objective(rule=obj, sense=pyo.minimize)
print("Done setting objective!")
################################################################################################################
# CONSTRAINTS ##################################################################################################
# def constr_y_heuristic_flying_start(model, v, i, t):
# if prbl.is_factory_node(node_id=i) or not model.warm_start[0]:
# return Constraint.Skip
# return model.y[v, i, t] == int(y_init_dict[v, i, t])
#
# self.m.constr_y_heuristic_flying_start = pyo.Constraint(self.m.NODES_FOR_VESSELS_TUP,
# self.m.TIME_PERIODS,
# rule=constr_y_heuristic_flying_start,
# name="constr_y_heuristic_flying_start")
# def constr_max_one_activity(model, v, t):
# relevant_nodes = {n for (vessel, n) in model.NODES_FOR_VESSELS_TUP if vessel == v}
#
# return (sum(model.y_minus[v, i, t] +
# model.y_plus[v, i, t] +
# sum(model.x[v, i, j, t] for j in relevant_nodes) +
# model.w[v, i, t]
# for i in relevant_nodes)
# <= 1)
def constr_max_one_activity(model, v, t):
if t < model.start_time_for_vessels[v]: # Skip constraint if vessel has not become available in t
return pyo.Constraint.Skip
else:
return (sum(model.y[v, i, tau]
for v2, i in model.NODES_FOR_VESSELS_TUP if v2 == v
for tau in range(max(0, t - model.loading_unloading_times[v, i] + 1), t + 1))
+ sum(model.x[v, i, j, tau]
for i, j in model.ARCS[v]
for tau in range(max(0, t - model.transport_times[v, i, j] + 1), t + 1))
+ sum(model.w[v, i, t]
for v2, i in model.NODES_FOR_VESSELS_TUP if v2 == v)
== 1)
self.m.constr_max_one_activity = pyo.Constraint(self.m.VESSELS,
self.m.TIME_PERIODS,
rule=constr_max_one_activity,
name="constr_max_one_activity")
def constr_max_m_vessels_loading(model, i, t):
return (sum(model.y[v, i, tau]
for i2, v in model.VESSELS_FOR_FACTORY_NODES_TUP if i2 == i
for tau in range(max(0, t - model.loading_unloading_times[v, i] + 1), t + 1))
<= model.factory_max_vessels_loading[i, t])
self.m.constr_max_m_vessels_loading = pyo.Constraint(self.m.FACTORY_NODES,
self.m.TIME_PERIODS,
rule=constr_max_m_vessels_loading,
name="constr_max_m_vessels_loading")
def constr_delivery_within_time_window(model, i):
relevant_vessels = {vessel for (vessel, j) in model.ORDER_NODES_FOR_VESSELS_TUP if j == i}
relevant_time_periods = {t for (j, t) in model.TIME_WINDOWS_FOR_ORDERS_TUP if j == i}
return (sum(model.y[v, i, t] for v in relevant_vessels for t in relevant_time_periods) + model.e[i]
== 1)
self.m.constr_delivery_within_time_window = pyo.Constraint(self.m.ORDER_NODES,
rule=constr_delivery_within_time_window,
name="constr_delivery_within_time_window")
def constr_sailing_after_loading_unloading(model, v, i, t):
loading_unloading_time = pyo.value(model.loading_unloading_times[v, i])
relevant_destination_nodes = [j for i2, j in model.ARCS[v]
if i2 == i
and j != 'd_-1']
if t < loading_unloading_time:
return 0 == sum(model.x[v, i, j, t] for j in relevant_destination_nodes)
else:
return (model.y[v, i, (t - loading_unloading_time)]
==
sum(model.x[v, i, j, t] for j in relevant_destination_nodes))
self.m.constr_sailing_after_loading_unloading = pyo.Constraint(self.m.NODES_FOR_VESSELS_TUP,
self.m.TIME_PERIODS,
rule=constr_sailing_after_loading_unloading,
name="constr_sailing_after_loading_unloading")
def constr_wait_load_unload_after_sailing(model, v, i, t):
relevant_nodes = [j for j, i2 in model.ARCS[v]
if i2 == i
and model.transport_times[v, j, i] <= t]
# Only allow sailing from i to dummy end node if arc is defined
x_to_dummy_end = model.x[v, i, 'd_-1', t] if (i, 'd_-1') in model.ARCS[v] else 0
if t == 0: # exclude w_t-1
return (sum(
model.x[v, j, i, (t - model.transport_times[v, j, i])] for j in relevant_nodes)
==
model.y[v, i, t] + model.w[v, i, t] + x_to_dummy_end)
else:
return (sum(
model.x[v, j, i, (t - model.transport_times[v, j, i])] for j in relevant_nodes)
+ model.w[v, i, (t - 1)]
==
model.y[v, i, t] + model.w[v, i, t] + x_to_dummy_end)
self.m.constr_wait_load_unload_after_sailing = pyo.Constraint(self.m.NODES_FOR_VESSELS_TUP,
self.m.TIME_PERIODS,
rule=constr_wait_load_unload_after_sailing,
name="constr_wait_load_unload_after_sailing")
def constr_start_route(model, v):
return model.x[v, 'd_0', model.vessel_initial_locations[v], model.start_time_for_vessels[v]] == 1
self.m.constr_start_route = pyo.Constraint(self.m.VESSELS, rule=constr_start_route,
name="constr_start_route")
def constr_start_route_once(model, v):
return (sum(model.x[v, 'd_0', j, t]
for i, j in model.ARCS[v] if i == 'd_0'
for t in model.TIME_PERIODS)
== 1)
self.m.constr_start_route_once = pyo.Constraint(self.m.VESSELS, rule=constr_start_route_once,
name="constr_start_route_once")
def constr_end_route_once(model, v):
return (sum(model.x[v, i, 'd_-1', t]
for t in model.TIME_PERIODS
for vessel, i in model.FACTORY_NODES_FOR_VESSELS_TUP
if vessel == v)
== 1)
self.m.constr_end_route_once = pyo.Constraint(self.m.VESSELS, rule=constr_end_route_once,
name="constr_end_route_once")
def constr_maximum_vessels_at_end_destination(model, i):
return (sum(model.x[v, i, 'd_-1', t]
for v in model.VESSELS
for t in model.TIME_PERIODS)
<= model.factory_max_vessels_destination[i])
self.m.constr_maximum_vessels_at_end_destination = pyo.Constraint(self.m.FACTORY_NODES,
rule=constr_maximum_vessels_at_end_destination,
name="constr_maximum_vessels_at_end_destination")
def constr_pickup_requires_factory_visit(model, v, i, j, t):
return model.z[v, i, j, t] <= model.y[v, i, t]
self.m.constr_pickup_requires_factory_visit = pyo.Constraint(self.m.ORDER_NODES_FACTORY_NODES_FOR_VESSELS_TRIP,
self.m.TIME_PERIODS,
rule=constr_pickup_requires_factory_visit,
name="constr_pickup_requires_factory_visit")
def constr_delivery_requires_order_visit(model, v, i, t):
return model.z[v, i, i, t] == model.y[v, i, t]
self.m.constr_delivery_requires_order_visit = pyo.Constraint(self.m.ORDER_NODES_FOR_VESSELS_TUP,
self.m.TIME_PERIODS,
rule=constr_delivery_requires_order_visit,
name="constr_delivery_requires_order_visit")
def constr_vessel_initial_load(model, v, p):
return (model.l[v, p, 0] ==
sum(model.demands[i, j, p] * model.z[v, i, j, 0]
for (v2, i, j) in model.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TRIP
if v2 == v))
self.m.constr_vessel_initial_load = pyo.Constraint(self.m.VESSELS,
self.m.PRODUCTS,
rule=constr_vessel_initial_load,
name="constr_vessel_initial_load")
def constr_load_balance(model, v, p, t):
if t == 0:
return Constraint.Feasible
return (model.l[v, p, t] == model.l[v, p, (t - 1)] -
sum(model.demands[i, j, p] * model.z[v, i, j, t]
for (v2, i, j) in model.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TRIP
if v2 == v))
self.m.constr_load_balance = pyo.Constraint(self.m.VESSELS,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
rule=constr_load_balance,
name="constr_load_balance")
def constr_product_load_binary_activator(model, v, p, t):
return model.l[v, p, t] <= model.vessel_ton_capacities[v] * model.h[v, p, t]
self.m.constr_product_load_binary_activator = pyo.Constraint(self.m.VESSELS,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
rule=constr_product_load_binary_activator,
name="constr_product_load_binary_activator")
def constr_load_below_vessel_ton_capacity(model, v, t):
if t == 0:
return Constraint.Feasible
return sum(model.l[v, p, (t - 1)] for p in model.PRODUCTS) <= (model.vessel_ton_capacities[v] *
(1 - sum(model.y[v, i, t] for i in
model.FACTORY_NODES)))
self.m.constr_load_below_vessel_ton_capacity = pyo.Constraint(self.m.VESSELS,
self.m.TIME_PERIODS,
rule=constr_load_below_vessel_ton_capacity,
name="constr_load_below_vessel_ton_capacity")
def constr_load_below_vessel_nprod_capacity(model, v, t):
return sum(model.h[v, p, t] for p in model.PRODUCTS) <= model.vessel_nprod_capacities[v]
self.m.constr_load_below_vessel_nprod_capacity = pyo.Constraint(self.m.VESSELS,
self.m.TIME_PERIODS,
rule=constr_load_below_vessel_nprod_capacity,
name="constr_load_below_vessel_nprod_capacity")
# def constr_zero_final_load(model, v, p):
# return model.l[v, p, max(model.TIME_PERIODS)] == 0
#
# self.m.constr_zero_final_load = pyo.Constraint(self.m.VESSELS,
# self.m.PRODUCTS,
# rule=constr_zero_final_load)
def constr_inventory_below_capacity(model, i, t):
return sum(model.s[i, p, t] for p in model.PRODUCTS) <= model.factory_inventory_capacities[i]
self.m.constr_inventory_below_capacity = pyo.Constraint(self.m.FACTORY_NODES,
self.m.TIME_PERIODS,
rule=constr_inventory_below_capacity,
name="constr_inventory_below_capacity")
def constr_initial_inventory(model, i, p):
return (model.s[i, p, 0] == model.factory_initial_inventories[i, p] +
sum(model.demands[i, j, p] * model.z[v, i, j, 0]
for (v, i2, j) in model.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TRIP
if i2 == i))
self.m.constr_initial_inventory = pyo.Constraint(self.m.FACTORY_NODES,
self.m.PRODUCTS,
rule=constr_initial_inventory,
name="constr_initial_inventory")
def constr_inventory_balance(model, i, p, t):
if t == 0:
return Constraint.Feasible
return (model.s[i, p, t] == model.s[i, p, (t - 1)]
+ sum(model.production_max_capacities[l, p] * model.g[l, p, (t - 1)]
for (ii, l) in model.PRODUCTION_LINES_FOR_FACTORIES_TUP if ii == i)
+ sum(model.demands[i, j, p] * model.z[v, i, j, t]
for v, i2, j in model.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TRIP
if i2 == i))
# sum(model.q[l, p, t - 1] for (ii, l) in model.PRODUCTION_LINES_FOR_FACTORIES_TUP if ii == i) +
self.m.constr_inventory_balance = pyo.Constraint(self.m.FACTORY_NODES,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
rule=constr_inventory_balance,
name="constr_inventory_balance")
def constr_production_below_max_capacity(model, i, l, p, t):
# return (model.q[l, p, t]
# == model.production_stops[i, t] * model.production_max_capacities[l, p] * model.g[l, p, t])
return model.g[l, p, t] <= model.production_stops[i, t]
self.m.constr_production_below_max_capacity = pyo.Constraint(self.m.PRODUCTION_LINES_FOR_FACTORIES_TUP,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
rule=constr_production_below_max_capacity,
name="constr_production_below_max_capacity")
# def constr_production_above_min_capacity(model, l, p, t):
# return model.q[l, p, t] >= model.production_min_capacities[l, p] * model.g[l, p, t]
#
# self.m.constr_production_above_min_capacity = pyo.Constraint(self.m.PRODUCTION_LINES,
# self.m.PRODUCTS,
# self.m.TIME_PERIODS,
# rule=constr_production_above_min_capacity)
def constr_activate_delta(model, l, p, t):
if t == 0:
return Constraint.Feasible
return model.g[l, p, t] - model.g[l, p, t - 1] <= model.delta[l, p, t]
self.m.constr_activate_delta = pyo.Constraint(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
rule=constr_activate_delta,
name="constr_activate_delta")
def constr_initial_production_start(model, l, p):
return model.delta[l, p, 0] == model.g[l, p, 0]
self.m.constr_initial_production_start = pyo.Constraint(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
rule=constr_initial_production_start,
name="constr_initial_production_start")
def constr_produce_minimum_number_of_periods(model, l, p, t):
relevant_time_periods = {tau for tau in model.TIME_PERIODS if
t <= tau <= t + model.production_line_min_times[l, p] - 1}
return (model.production_line_min_times[l, p] * model.delta[l, p, t]
<=
sum(model.g[l, p, tau] for tau in relevant_time_periods))
self.m.constr_produce_minimum_number_of_periods = pyo.Constraint(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
rule=constr_produce_minimum_number_of_periods,
name="constr_produce_minimum_number_of_periods")
def constr_production_line_availability(model, l, t):
return model.a[l, t] + sum(model.g[l, p, t] for p in model.PRODUCTS) == 1
self.m.constr_production_line_availability = pyo.Constraint(self.m.PRODUCTION_LINES,
self.m.TIME_PERIODS,
rule=constr_production_line_availability,
name="constr_production_line_availability")
def constr_production_shift(model, l, p, t):
if t == 0:
return Constraint.Feasible
relevant_products = {q for (qq, q) in model.PRODUCTS_WITHIN_SAME_PRODUCT_GROUP_TUP if qq == p}
return model.g[l, p, (t - 1)] <= model.a[l, t] + sum(model.g[l, q, t] for q in relevant_products)
self.m.constr_production_shift = pyo.Constraint(self.m.PRODUCTION_LINES,
self.m.PRODUCTS,
self.m.TIME_PERIODS,
rule=constr_production_shift,
name="constr_production_shift")
def constr_wait_if_visit_sick_farm(model, v, i, j, t):
if model.transport_times[v, i, j] <= t <= len(model.TIME_PERIODS) - model.min_wait_if_sick[v, i, j]:
return (model.min_wait_if_sick[v, i, j] * model.x[v, i, j, t - model.transport_times[v, i, j]]
<=
sum(model.w[v, j, tau] for tau in range(t, t + model.min_wait_if_sick[v, i, j])))
else:
return Constraint.Feasible
self.m.constr_wait_if_visit_sick_farm = pyo.Constraint(self.m.WAIT_EDGES_FOR_VESSEL_TRIP,
self.m.TIME_PERIODS,
rule=constr_wait_if_visit_sick_farm,
name="constr_wait_if_visit_sick_farm")
# Extension
if extended_model:
def constr_delivery_no_tw_violation(model, i):
return (sum(model.y[v, i, t]
for v, j in model.ORDER_NODES_FOR_VESSELS_TUP if i == j
for j, t in model.TIME_WINDOWS_FOR_ORDERS_TUP if i == j)
==
model.lambd[i, 0])
self.m.constr_delivery_within_time_window.deactivate() # Deactivate the current delivery constraint
self.m.constr_delivery_no_tw_violation = pyo.Constraint(self.m.ORDER_NODES,
rule=constr_delivery_no_tw_violation,
name="constr_delivery_no_tw_violation")
def constr_delivery_tw_violation_earlier(model, i, k):
if k < 0 and self.m.tw_min[i] + k in model.TIME_PERIODS:
return (sum(model.y[v, i, self.m.tw_min[i] + k]
for v, j in model.ORDER_NODES_FOR_VESSELS_TUP if i == j)
==
model.lambd[i, k])
else:
return Constraint.Skip
self.m.constr_delivery_tw_violation_earlier = pyo.Constraint(self.m.ORDER_NODES,
self.m.TIME_WINDOW_VIOLATIONS,
rule=constr_delivery_tw_violation_earlier,
name="constr_delivery_tw_violation_earlier")
def constr_delivery_tw_violation_later(model, i, k):
if k > 0 and self.m.tw_max[i] + k in model.TIME_PERIODS:
return (sum(model.y[v, i, self.m.tw_max[i] + k]
for v, j in model.ORDER_NODES_FOR_VESSELS_TUP if i == j)
==
model.lambd[i, k])
else:
return Constraint.Skip
self.m.constr_delivery_tw_violation_later = pyo.Constraint(self.m.ORDER_NODES,
self.m.TIME_WINDOW_VIOLATIONS,
rule=constr_delivery_tw_violation_later,
name="constr_delivery_tw_violation_later")
def constr_choose_one_tw_violation(model, i):
return (sum(model.lambd[i, k]
for k in model.TIME_WINDOW_VIOLATIONS
if model.tw_max[i] + k in model.TIME_PERIODS
and model.tw_min[i] + k in model.TIME_PERIODS)
+ model.e[i]
== 1)
self.m.constr_choose_one_tw_violation = pyo.Constraint(self.m.ORDER_NODES,
rule=constr_choose_one_tw_violation,
name="constr_choose_one_tw_violation")
def constr_rewarded_inventory_below_inventory_level(model, i, p):
return model.s_plus[i, p] <= model.s[i, p, max(model.TIME_PERIODS)]
self.m.constr_rewarded_inventory_below_inventory_level = pyo.Constraint(self.m.FACTORY_NODES,
self.m.PRODUCTS,
rule=constr_rewarded_inventory_below_inventory_level,
name="constr_rewarded_inventory_below_inventory_level")
def constr_rewarded_inventory_below_inventory_target(model, i, p):
return model.s_plus[i, p] <= model.inventory_targets[i, p]
self.m.constr_rewarded_inventory_below_inventory_target = pyo.Constraint(self.m.FACTORY_NODES,
self.m.PRODUCTS,
rule=constr_rewarded_inventory_below_inventory_target,
name="constr_rewarded_inventory_below_inventory_target")
print("Done setting constraints!")
def solve(self, verbose: bool = True, time_limit: int = None, warm_start: bool = False) -> None:
print("Solver running...")
# self.m.constr_y_heuristic_flying_start.deactivate()
t = time()
t_warm_solve = 0
if warm_start:
pass
# print(f"Preparing for warm-start...")
# self.solver_factory.options['TimeLimit'] = time_limit
# self.m.constr_y_heuristic_flying_start.activate()
# self.solver_factory.options['SolutionLimit'] = 1
# try:
# self.results = self.solver_factory.solve(self.m, tee=verbose)
# self.m.write("debug.lp")
# if self.results.solver.termination_condition == pyo.TerminationCondition.infeasible:
# warm_start = False
# print(f"Initial ALNS solution was regarded infeasible")
# except ValueError:
# print(f"No warm-start initial solution found within time limit")
# warm_start = False
#
# self.solver_factory.options['SolutionLimit'] = 2000000000 # Gurobi's default value
# print(f"...warm-start model completed!")
# t_warm_solve = time() - t
if time_limit:
remaining_time_limit = time_limit # max(time_limit - t_warm_solve, 60) if time_limit > 60 else time_limit - t_warm_solve
print(f"{round(remaining_time_limit, 1)} seconds remains out of the total of {time_limit} seconds")
self.solver_factory.options['TimeLimit'] = remaining_time_limit # time limit in seconds
try:
# self.m.constr_y_heuristic_flying_start.deactivate()
print(f"Solving model...")
self.results = self.solver_factory.solve(self.m, tee=verbose, warmstart=warm_start)
# logfile=f'../../log_files/console_output_{log_name}.log'
print(f"...model solved!")
print("Termination condition", self.results.solver.termination_condition)
if self.results.solver.termination_condition != pyo.TerminationCondition.optimal:
print("Not optimal termination condition: ", self.results.solver.termination_condition)
# log_infeasible_constraints(self.m, log_variables=True, log_expression=True)
print("Solve time: ", round(time() - t, 1))
except ValueError:
print(f"No solution found within time limit of {time_limit} seconds")
def print_result(self):
def print_result_variablewise():
def print_vessel_routing():
print("VESSEL ROUTING (x variable)")
for v in self.m.VESSELS:
for t in self.m.TIME_PERIODS:
for i, j in self.m.ARCS[v]:
if pyo.value(self.m.x[v, i, j, t]) == 1:
print("Vessel", v, "travels from", i, "to", j, "in time period", t)
print()
print()
def print_waiting():
print("WAITING (w variable)")
for v in self.m.VESSELS:
for i in self.m.ORDER_NODES:
for t in self.m.TIME_PERIODS:
if pyo.value(self.m.w[v, i, t]) >= 0.5:
print("Vessel", v, "waits to deliver order", i, "in time period", t)
print()
def print_order_delivery_and_pickup():
print("ORDER DELIVERY AND PICKUP (y variable)")
for v in self.m.VESSELS:
for t in self.m.TIME_PERIODS:
for (vv, i) in self.m.NODES_FOR_VESSELS_TUP:
if v == vv:
activity_str = "loading" if i in self.m.FACTORY_NODES else "unloading"
if pyo.value(self.m.y[v, i, t]) >= 0.5:
print(t, ": vessel ", v, " starts ", activity_str, " in node ", i, sep="")
print()
def print_factory_production():
print("FACTORY PRODUCTION (q variable)")
production = False
for t in self.m.TIME_PERIODS:
for l in self.m.PRODUCTION_LINES:
for p in self.m.PRODUCTS:
if pyo.value(self.m.g[l, p, t]) >= 0.5:
print("Production line", l, "produces",
pyo.value(self.m.production_max_capacities[l, p]), "tons of product",
p,
"in time period", t)
production = True
if not production:
print("Nothing is produced")
print()
def print_factory_inventory():
print("FACTORY INVENTORY (r variable)")
for t in self.m.TIME_PERIODS:
for i in self.m.FACTORY_NODES:
for p in self.m.PRODUCTS:
if pyo.value(self.m.s[i, p, t]) >= 0.5:
print("Factory", i, "holds", pyo.value(self.m.s[i, p, t]), "tons of product", p,
"as inventory in time period", t)
print()
def print_factory_pickup():
print("FACTORY PICKUPS (z variable)")
for v in self.m.VESSELS:
for t in self.m.TIME_PERIODS:
for j in self.m.ORDER_NODES:
for i in {n for (n, o) in self.m.ORDERS_RELATED_TO_NODES_TUP if o == j}:
if pyo.value(self.m.z[v, i, j, t]) >= 0.5:
print("Vessel", v, "handles order", j, "in node", i, "in time period", t)
print()
def print_vessel_load():
print("VESSEL LOAD (l variable)")
for v in self.m.VESSELS:
for p in self.m.PRODUCTS:
for t in self.m.TIME_PERIODS:
if pyo.value(self.m.l[v, p, t]) >= 0.5:
print("Vessel", v, "carries", pyo.value(self.m.l[v, p, t]), "tons of product", p,
"in time period", t)
print()
def print_orders_not_delivered():
all_delivered = True
print("ORDERS NOT DELIVERED (e variable)")
for i in self.m.ORDER_NODES:
if pyo.value(self.m.e[i]) >= 0.5:
print("Order", i, "is not delivered")
all_delivered = False
if all_delivered:
print("All orders have been delivered")
print()
def print_production_starts():
print("PRODUCTION START (delta variable)")
for i in self.m.FACTORY_NODES:
relevant_production_lines = {l for (ii, l) in self.m.PRODUCTION_LINES_FOR_FACTORIES_TUP if ii == i}
for l in relevant_production_lines:
print("Production line", l, "at factory", i)
for t in self.m.TIME_PERIODS:
for p in self.m.PRODUCTS:
if pyo.value(self.m.delta[l, p, t]) >= 0.5:
print(t, ": production of product ", p, " is started, imposing a cost of ",
pyo.value(self.m.production_start_costs[i, p]), ", and ",
pyo.value(self.m.production_max_capacities[l, p]), " is produced", sep="")
print()
def print_production_happens():
print("PRODUCTION HAPPENS (g variable)")
for i in self.m.FACTORY_NODES:
relevant_production_lines = {l for (ii, l) in self.m.PRODUCTION_LINES_FOR_FACTORIES_TUP if ii == i}
for l in relevant_production_lines:
print("Production line", l, "at factory", i)
for t in self.m.TIME_PERIODS:
for p in self.m.PRODUCTS:
if pyo.value(self.m.g[l, p, t]) >= 0.5:
print(t, ": production of product ", p, " happens ", sep="")
print()
def print_final_inventory():
print("FINAL INVENTORY AND TARGETS (r_plus variable)")
for i in self.m.FACTORY_NODES:
print("Factory", i)
for p in self.m.PRODUCTS:
if self.extended_model:
print("Rewarded inventory of product ", p, " is ", pyo.value(self.m.s_plus[i, p]),
", total final inventory is ", pyo.value(self.m.s[i, p, (max(self.m.TIME_PERIODS))]),
" and its target is ", pyo.value(self.m.inventory_targets[i, p]), sep="")
else:
print("Final inventory for", p, "is", pyo.value(self.m.s[i, p, (max(self.m.TIME_PERIODS))]))
print()
def print_available_production_lines():
print("AVAILABLE PRODUCTION LINES")
for ll in self.m.PRODUCTION_LINES:
for t in self.m.TIME_PERIODS:
print(t, ": production line ", ll, " has value ", pyo.value(self.m.a[ll, t]), sep="")
print()
def print_time_window_violations():
if self.extended_model:
for k in self.m.TIME_WINDOW_VIOLATIONS:
orders_with_k_violation = [i for i in self.m.ORDER_NODES if self.m.lambd[i, k]() > 0.5]
s = " " if k <= 0 else "+"
print(s + str(k), "violation:", orders_with_k_violation)
else:
print("No time window violation, extension is not applied")
print()
# PRINTING
print()
# print_factory_production()
# print_factory_inventory()
# print_vessel_routing()
# print_order_delivery_and_pickup()
# print_factory_pickup()
# print_waiting()
# print_vessel_load()
print_orders_not_delivered()
# print_production_starts()
# print_production_happens()
# print_final_inventory()
# print_available_production_lines()
# print_time_window_violations()
def print_result_eventwise():
def print_routes_simple():
table = []
for v in self.m.VESSELS:
row = [v]
for t in self.m.TIME_PERIODS:
action_in_period = False
for i in self.m.NODES:
# Check if node may be visited by vessel
if i not in [i2 for v2, i2 in self.m.NODES_FOR_VESSELS_TUP if v2 == v]:
continue
if self.m.y[v, i, t]() > 0.5:
row.append(i) # load or unload
action_in_period = True
if self.m.w[v, i, t]() > 0.5:
row.append('.') # wait
action_in_period = True
if i in self.m.FACTORY_NODES and self.m.x[v, i, 'd_-1', t]() >= 0.5: # route ends
row.append(i)
action_in_period = True
for i, j in self.m.ARCS[v]:
if self.m.x[v, i, j, t]() > 0.5 and i != 'd_0' and j != 'd_-1':
row.append(">") # sail
action_in_period = True
if not action_in_period:
row.append(" ")
table.append(row)
print(tabulate(table, headers=["vessel"] + list(self.m.TIME_PERIODS)))
print()
def print_y():
active_y_s = [(v, i, t)
for v in self.m.VESSELS
for v2, i in self.m.NODES_FOR_VESSELS_TUP
for t in self.m.TIME_PERIODS
if v2 == v and self.m.y[v, i, t]() > 0.5]
print("Active y's:", active_y_s)
def print_routing(include_loads=True):
for v in self.m.VESSELS:
print("ROUTING OF VESSEL", v)
for t in self.m.TIME_PERIODS:
curr_load = [round(self.m.l[v, p, t]()) for p in self.m.PRODUCTS]
# x variable
for i, j in self.m.ARCS[v]:
if pyo.value(self.m.x[v, i, j, t]) >= 0.5:
print(t, ": ", i, " --> ", j, sep="")
if include_loads and i != 'd_0':
print(" load: ", curr_load)
# w variable
for i in self.m.NODES:
if pyo.value(self.m.w[v, i, t]) >= 0.5:
print(t, ": waits to go to ", i, sep="")
if include_loads:
print(" load: ", curr_load)
for i in [j for (vessel, j) in self.m.NODES_FOR_VESSELS_TUP if vessel == v]:
# y variable
if pyo.value(self.m.y[v, i, t]) >= 0.5:
activity_str = "loads" if i in self.m.FACTORY_NODES else "unloads"
print(t, ": ", activity_str, " in node ", i, sep="")
if include_loads:
print(" load: ", curr_load)
# z variable
for (v2, n, o) in self.m.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TRIP:
if v2 == v and pyo.value(self.m.z[v, n, o, t]) >= 0.5:
print(" [handles order ", o, " in node ", n, "]", sep="")
print()
def print_vessel_load():
for v in self.m.VESSELS:
print("LOAD AT VESSEL", v)
for t in self.m.TIME_PERIODS:
curr_load = [round(self.m.l[v, p, t]()) for p in self.m.PRODUCTS]
if sum(curr_load) > 0.5:
print(t, ": ", curr_load, sep="")
print()
def print_production_and_inventory():
for i in self.m.FACTORY_NODES:
print("PRODUCTION AND INVENTORY AT FACTORY", i)
for t in self.m.TIME_PERIODS:
relevant_production_lines = {l for (ii, l) in self.m.PRODUCTION_LINES_FOR_FACTORIES_TUP if
ii == i}
production = [round(sum(self.m.g[l, p, t]() * self.m.production_max_capacities[l, p]
for l in relevant_production_lines))
for p in sorted(self.m.PRODUCTS)]
inventory = [round(self.m.s[i, p, t]()) for p in sorted(self.m.PRODUCTS)]
if sum(production) > 0.5:
print(t, ": production: \t", production, sep="")
print(t, ": inventory: \t", inventory, sep="")
# for p in self.m.PRODUCTS:
# if pyo.value(self.m.q[i, p, t]) >= 0.5:
# print(t, ": production of ", round(pyo.value(self.m.q[i, p, t]), 1),
# " tons of product ",
# p, sep="")
# if pyo.value(self.m.s[i, p, t]) >= 0.5:
# print(t, ": inventory level is ", round(pyo.value(self.m.s[i, p, t]), 1),
# " tons of product ", p, sep="")
# relevant_order_nodes = {j for (f, j) in self.m.ORDER_NODES_FOR_FACTORIES_TUP if f == i}
# loaded_onto_vessels = pyo.value(sum(
# self.m.demands[i, j, p] * self.m.z[v, i, j, t]
# for (v, i2, j) in self.m.ORDER_NODES_RELEVANT_NODES_FOR_VESSELS_TUP if i == i2))
# if loaded_onto_vessels >= 0.5:
# print(t, ": ", round(loaded_onto_vessels, 1), " tons of product ", p,
# " is loaded onto vessels ", sep="")
print()
def print_production_simple():
for i in self.m.FACTORY_NODES:
relevant_production_lines = {l for (ii, l) in self.m.PRODUCTION_LINES_FOR_FACTORIES_TUP if ii == i}
table = []
print("Factory", i)
for p in self.m.PRODUCTS:
row = [p, "prod"]
for t in self.m.TIME_PERIODS:
if sum(self.m.g[l, p, t]() for l in relevant_production_lines) > 0.5:
row.append(
round(sum(self.m.g[l, p, t]() * self.m.production_max_capacities[l, p] for l in
relevant_production_lines))) # + " [" + str(self.m.s[i, p, t]()) + "]")
else:
row.append(" ")
table.append(row)
row = ["\"", "inv"]
for t in self.m.TIME_PERIODS:
if t == 0:
row.append(round(self.m.s[i, p, 0]()))
elif abs(self.m.s[i, p, t]() - self.m.s[i, p, t - 1]()) > 0.5:
row.append(round(self.m.s[i, p, t]())) # + " [" + str(self.m.s[i, p, t]()) + "]")
else:
row.append(" ")
table.append(row)
table.append(["____"] * (len(list(self.m.TIME_PERIODS)) + 2))
print(tabulate(table, headers=["product", "prod/inv"] + list(self.m.TIME_PERIODS)))
print()
# print_routing(include_loads=False)
# print_vessel_load()
# print_production_and_inventory()
print_production_simple()
print_y()
print_routes_simple()
def print_objective_function_components():
inventory_cost = self.get_inventory_cost()
transport_cost = self.get_transport_cost()
production_start_cost = self.get_production_start_cost()
unmet_order_cost = self.get_unmet_order_cost()
sum_obj = inventory_cost + transport_cost + unmet_order_cost + production_start_cost
if self.extended_model:
time_window_violation_cost = (sum(self.m.time_window_violation_cost[k] * self.m.lambd[i, k]()
for i in self.m.ORDER_NODES
for k in self.m.TIME_WINDOW_VIOLATIONS))
final_inventory_reward = (-sum(self.m.inventory_unit_rewards[i] * pyo.value(self.m.s_plus[i, p])
for i in self.m.FACTORY_NODES
for p in self.m.PRODUCTS))
sum_obj += time_window_violation_cost + final_inventory_reward
print("Time window violation cost:", round(time_window_violation_cost, 2))
print("Final inventory reward (negative cost):", round(final_inventory_reward, 2))
print("Inventory cost:", round(inventory_cost, 2))
print("Transport cost:", round(transport_cost, 2))
print("Production start cost:", round(production_start_cost, 2))
print("Unmet order cost:", round(unmet_order_cost, 2))
print("Sum of above cost components:", round(sum_obj, 15))
print("Objective value (from Gurobi):", pyo.value(self.m.objective))
print_result_variablewise()
print_result_eventwise()
print_objective_function_components()
def get_production_start_cost(self) -> int:
return int(sum(self.m.production_start_costs[i, p] * pyo.value(self.m.delta[l, p, t])
for i in self.m.FACTORY_NODES
for (ii, l) in self.m.PRODUCTION_LINES_FOR_FACTORIES_TUP if i == ii
for p in self.m.PRODUCTS
for t in self.m.TIME_PERIODS))
def get_inventory_cost(self) -> int:
return int(sum(self.m.inventory_unit_costs[i] * pyo.value(self.m.s[i, p, t])
for t in self.m.TIME_PERIODS
for p in self.m.PRODUCTS
for i in self.m.FACTORY_NODES))
def get_transport_cost(self) -> int:
return int(sum(self.m.transport_unit_costs[v] * self.m.transport_times_exact[v, i, j]
* pyo.value(self.m.x[v, i, j, t])
for t in self.m.TIME_PERIODS
for v in self.m.VESSELS
for i, j in self.m.ARCS[v]
if i != 'd_0' and j != 'd_-1'))
def get_unmet_order_cost(self) -> int:
return int(sum(self.m.external_delivery_penalties[i] * pyo.value(self.m.e[i])
for i in self.m.ORDER_NODES))
def get_orders_not_served(self) -> List[str]:
orders_not_served: List[str] = []
for i in self.m.ORDER_NODES:
if pyo.value(self.m.e[i]) > 0.5:
orders_not_served.append(i)
return orders_not_served
def write_to_file(self, excel_writer: pd.ExcelWriter, id: int = 0) -> None:
inventory_cost = self.get_inventory_cost()
transport_cost = self.get_transport_cost()
unmet_order_cost = self.get_unmet_order_cost()
production_start_cost = self.get_production_start_cost()
lb = self.results.Problem._list[0].lower_bound
ub = self.results.Problem._list[0].upper_bound
solve_time = self.results.Solver._list[0].wall_time
solution_dict = {'obj_val': round(pyo.value(self.m.objective), 2),
'production_start_cost': round(production_start_cost, 2),
'inventory_cost': round(inventory_cost, 2),
'transport_cost': round(transport_cost, 2),
'unmet_order_cost': round(unmet_order_cost, 2),
'number_orders_not_served': len(self.get_orders_not_served()),
'lower_bound': round(lb, 2),
'upper_bound': round(ub, 2),
'mip_gap': str(round(((ub-lb)/ub)*100, 2)) + "%",
'time_limit [sec]': self.solver_factory.options['TimeLimit'],
'solve_time': solve_time} # TODO
# TODO: Add relevant data
sheet_name = "run_" + str(id)
df = pd.DataFrame(solution_dict, index=[0]).transpose()
df.to_excel(excel_writer, sheet_name=sheet_name, startrow=1)
if __name__ == '__main__':
# problem_data = ProblemData('../../data/input_data/small_testcase_one_vessel.xlsx')
# problem_data = ProblemData('../../data/input_data/small_testcase.xlsx')
# problem_data = ProblemData('../../data/input_data/medium_testcase.xlsx')
# problem_data = ProblemData('../../data/input_data/large_testcase.xlsx')
# problem_data = ProblemData('../../data/input_data/larger_testcase.xlsx')
# problem_data = ProblemData('../../data/input_data/larger_testcase_4vessels.xlsx')
# PARAMETERS TO FIX BEFORE USE ###
partial_warm_start = False # TODO: Fix
num_alns_iterations = 100 # only used if partial_warm_start = True
extensions = False # extensions _not_ supported in generated test files
# PARAMETERS TO CHANGE ###
# time_limit = 100
# EXTERNAL RUN
parser = argparse.ArgumentParser(description='process FFPRP input parameters')
parser.add_argument('input_filepath', type=str, help='path of input data file')
parser.add_argument('time_limit', type=str, help='path of input data file')
args = parser.parse_args()
output_filepath = "data/output_data/gurobi-" + str(args.input_filepath.split("/")[-1])
time_limit = int(args.time_limit)
# Execution line format: python3 src/models/ffprp_model.py data/input_data/f1-v3-o20-t50.xlsx
# PARAMETERS NOT TO CHANGE ###
problem_data = ProblemData(args.input_filepath) # ProblemData(file_path)
problem_data.soft_tw = extensions
y_init_dict = None
if partial_warm_start:
pass
# problem_data_ext = ProblemDataExtended(file_path) # TODO: Fix to avoid to prbl reads (problem with nodes field)
# problem_data_ext.soft_tw = extensions
# t = time()
# y_init_dict = src.alns.alns.run_alns(prbl=problem_data_ext, iterations=num_alns_iterations,
# skip_production_problem_postprocess=partial_warm_start, verbose=False)
# print(f"ALNS warmup time {round(time() - t, 1)}")
model = FfprpModel(problem_data, extended_model=extensions) #, y_init_dict=y_init_dict)
model.solve(time_limit=time_limit, warm_start=partial_warm_start)
# WRITE TO FILE
excel_writer = | pd.ExcelWriter(output_filepath, engine='openpyxl', mode='w', options={'strings_to_formulas': False}) | pandas.ExcelWriter |
"""
Train BERT-based models for each of the three cQA StackExchange topics: Apple, Cooking and Travel. Make predictions
for the corresponding test sets and save to a CSV file.
Steps for each topic:
1. Load training data.
2. Load validation data.
3. Initialise and train a model. <SKIP>
4. Sanity check by testing on the training and validation sets. <SKIP>
5. Load test data.
6. Compute predictions. <INITIALLY put 1 for the gold and 0 for the other answers>
7. Write to CSV files.
Each topic stores CSV files under 'data/BERT_cQA_vec_pred/%s/' % topic.
For each question in the test dataset, we save a separate CSV file.
The csv files contain a row per candidate answer + a row at the end for the gold answer (this row will be a
duplicate of one of the others.)
The columns are: 'answer' (the text of the answer), 'prediction' (the score), 'vector' (the embedding of the answer
taken from our final BERT layer).
"""
import csv
import sys
import pandas as pd
import logging
import os
import numpy as np
import torch
from torch.nn import ReLU
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup, DistilBertModel, \
DistilBertTokenizer
from torch import nn, tensor, dtype
logging.basicConfig(level=logging.INFO)
class BertRanker(nn.Module):
def __init__(self):
super(BertRanker, self).__init__()
bert = BertModel.from_pretrained("bert-base-cased")
self.embedding_size = bert.config.hidden_size
# self.bert = DistilBertModel.from_pretrained("distilbert-base-cased")
self.bert = nn.DataParallel(bert)
self.pooling = nn.AdaptiveAvgPool1d(1)
self.pooling = nn.DataParallel(self.pooling)
# self.out = nn.Linear(bert.config.hidden_size, 1)
self.W1 = nn.Linear(self.embedding_size, 100)
self.W1 = nn.DataParallel(self.W1)
self.W2 = nn.Linear(100, 10)
self.W2 = nn.DataParallel(self.W2)
self.out = nn.Linear(10, 1) # only need one output because we just want a rank score
self.relu = ReLU()
def forward(self, input_ids1, attention_mask1, input_ids2, attention_mask2):
sequence_emb = self.bert(
input_ids=input_ids1,
attention_mask=attention_mask1
)[0]
sequence_emb = sequence_emb.transpose(1, 2)
pooled_output_1 = self.pooling(sequence_emb)
pooled_output_1 = pooled_output_1.transpose(2, 1)
h1_1 = self.relu(self.W1(pooled_output_1))
h2_1 = self.relu(self.W2(h1_1))
scores_1 = self.out(h2_1)
sequence_emb = self.bert(
input_ids=input_ids2,
attention_mask=attention_mask2
)[0]
sequence_emb = sequence_emb.transpose(1, 2)
pooled_output_2 = self.pooling(sequence_emb)
pooled_output_2 = pooled_output_2.transpose(2, 1)
h1_2 = self.relu(self.W1(pooled_output_2))
h2_2 = self.relu(self.W2(h1_2))
scores_2 = self.out(h2_2)
return scores_1, scores_2
def forward_single_item(self, input_ids, attention_mask):
sequence_emb = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
)[0]
sequence_emb = sequence_emb.transpose(1, 2)
pooled_output = self.pooling(sequence_emb)
pooled_output = pooled_output.transpose(2, 1)
h1 = self.relu(self.W1(pooled_output))
h2 = self.relu(self.W2(h1))
scores = self.out(h2)
return scores, torch.squeeze(pooled_output).detach()
def train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler):
model = model.train()
losses = []
ncorrect = 0
count_examples = 0
for step, batch in enumerate(data_loader):
if np.mod(step, 100) == 0:
print("Training step %i / %i" % (step, len(data_loader)))
input_ids1 = batch["input_ids1"].to(device)
attention_mask1 = batch["attention_mask1"].to(device)
input_ids2 = batch["input_ids2"].to(device)
attention_mask2 = batch["attention_mask2"].to(device)
scores_1, scores_2 = model(
input_ids1=input_ids1,
attention_mask1=attention_mask1,
input_ids2=input_ids2,
attention_mask2=attention_mask2
)
ncorrect += float(torch.sum(torch.gt(scores_1, scores_2))) # first score is always meant to be higher
count_examples += len(scores_1)
loss = loss_fn(scores_1, scores_2, batch['targets'].to(device))
losses.append(float(loss.item()))
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return ncorrect / float(count_examples), np.mean(losses)
def train_bertcqa(data_loader, nepochs=1, random_seed=42, save_path='saved_bertcqa_params', reload_model=False):
# For reproducibility while debugging. TODO: vary this during real experiments.
np.random.seed(random_seed)
torch.manual_seed(random_seed)
# Get the device for running the training and prediction
if torch.cuda.is_available():
device = torch.device("cuda")
print('Selecting device -- using cuda')
print('Selected device: ')
print(device)
print('Current cuda device:')
print(torch.cuda.current_device())
else:
device = torch.device("cpu")
print('Selecting device -- using CPU')
# Create the BERT-based model
model = BertRanker()
model = model.to(device)
if reload_model and os.path.exists(save_path+'.pkl'):
print('Found a previously-saved model... reloading')
model.load_state_dict(torch.load(save_path+'.pkl'))
with open(save_path+'_num_epochs.txt', 'r') as fh:
epochs_completed = int(fh.read())
print('Number of epochs already completed: %i' % epochs_completed)
else:
epochs_completed = 0
optimizer = AdamW(model.parameters(), lr=5e-5, correct_bias=False)
optimizer.zero_grad()
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=len(data_loader) * nepochs * 0.1,
num_training_steps=len(data_loader) * nepochs
)
loss_fn = nn.MarginRankingLoss(margin=0.0).to(device)
for epoch in range(epochs_completed, nepochs):
print('Training epoch %i' % epoch)
train_acc, train_loss = train_epoch(
model,
data_loader,
loss_fn,
optimizer,
device,
scheduler
)
print(f'Train loss {train_loss} pairwise label accuracy {train_acc}')
print('Saving trained model')
torch.save(model.state_dict(), save_path+'.pkl')
# write the number of epochs to file. If we need to restart training, we don't need to repeat all epochs.
with open(save_path+'_num_epochs.txt', 'w') as fh:
fh.write(str(epoch+1))
return model, device
def predict_bertcqa(model, data_loader, device):
scores = np.zeros(0)
vectors = np.zeros((0, model.embedding_size))
qids = np.zeros(0)
ismatch = np.zeros(0)
model.eval()
for step, batch in enumerate(data_loader):
if np.mod(step, 100) == 0:
print("Prediction step %i / %i" % (step, len(data_loader)))
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
batch_scores, batch_vectors = model.forward_single_item(input_ids, attention_mask)
print('step %i' % step)
print('batch_vctor shape ' + str(batch_vectors.shape))
print('vectors shape ' + str(vectors.shape))
scores = np.append(scores, batch_scores.cpu().detach().numpy().flatten())
batch_vectors = batch_vectors.cpu().numpy()
if batch_vectors.ndim == 1:
batch_vectors = batch_vectors[None, :]
vectors = np.concatenate((vectors, batch_vectors), axis=0)
qids = np.append(qids, batch["qid"].detach().numpy().flatten())
ismatch = np.append(ismatch, batch["ismatch"].detach().numpy().flatten())
print('Outputting an embedding vector with shape ' + str(np.array(vectors).shape))
return scores, vectors, qids, ismatch
def evaluate_accuracy(model, data_loader, device):
scores, vectors, qids, matches = predict_bertcqa(model, data_loader, device)
unique_questions = np.unique(qids)
ncorrect = 0
nqs = len(unique_questions)
for q_id in unique_questions:
qscores = scores[qids == q_id]
isgold = matches[qids == q_id]
if isgold[np.argmax(qscores)]:
ncorrect += 1
acc = ncorrect / float(nqs)
print("Accuracy = %f" % acc)
return acc, scores, vectors
# Create the dataset class
class SEPairwiseDataset(Dataset):
def __init__(self, qa_pairs: list):
self.tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
# BertTokenizer.from_pretrained('bert-base-cased')
self.qa_pairs = qa_pairs
self.max_len = 512
def __len__(self):
return len(self.qa_pairs)
def __getitem__(self, i):
# first item in the pair
encoding1 = self.tokenizer.encode_plus(
self.qa_pairs[i][0],
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
# second item in the pair
encoding2 = self.tokenizer.encode_plus(
self.qa_pairs[i][1],
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
return {
'text1': self.qa_pairs[i][0],
'text2': self.qa_pairs[i][1],
'input_ids1': encoding1['input_ids'].flatten(),
'input_ids2': encoding2['input_ids'].flatten(),
'attention_mask1': encoding1['attention_mask'].flatten(),
'attention_mask2': encoding2['attention_mask'].flatten(),
'targets': tensor(1, dtype=torch.float)
}
class SESingleDataset(Dataset):
def __init__(self, qas: list, qids: list, aids: list, goldids: dict):
self.tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
# BertTokenizer.from_pretrained('bert-base-cased')
self.qas = qas
self.qids = qids
self.aids = aids
self.goldids = goldids
self.max_len = 512
def __len__(self):
return len(self.qas)
def __getitem__(self, i):
# first item in the pair
encoding1 = self.tokenizer.encode_plus(
self.qas[i],
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
return {
'text1': self.qas[i],
'input_ids': encoding1['input_ids'].flatten(),
'attention_mask': encoding1['attention_mask'].flatten(),
'targets': tensor(1, dtype=torch.long),
'qid': self.qids[i],
'ismatch': self.goldids[self.qids[i]] == self.aids[i]
}
def construct_pairwise_dataset(dataframe, n_neg_samples=10):
"""
Function for constructing a pairwise training set where each pair consists of a matching QA sequence and a
non-matching QA sequence.
:param n_neg_samples: Number of pairs to generate for each question by sampling non-matching answers and pairing
them with matching answers.
:param dataframe:
:return:
"""
# Get the positive (matching) qs and as from traindata and put into pairs
# Sample a number of negative (non-matching) qs and as from the answers listed for each question in traindata
qa_pairs = []
for idx, qid in enumerate(dataframe.index):
# Reconstruct the text sequences for the training questions
tokids = questions.loc[qid].values[0].split(' ')
toks = vocab[np.array(tokids).astype(int)]
question = ' '.join(toks)
# Reconstruct the text sequences for the true answers
gold_ans_id = dataframe.loc[qid]["goldid"]
# some of the lines seem to have two gold ids. Just use the first.
gold_ans_ids = gold_ans_id.split(' ')
gold_ans_id = gold_ans_ids[0]
tokids = answers.loc[gold_ans_id].values[0].split(' ')
toks = vocab[np.array(tokids).astype(int)]
gold_ans = ' '.join(toks)
# Join the sequences. Insert '[SEP]' between the two sequences
qa_gold = question + ' [SEP] ' + gold_ans
# Reconstruct the text sequences for random wrong answers
wrong_ans_ids = dataframe.loc[qid]["ansids"]
wrong_ans_ids = wrong_ans_ids.split(' ')
if len(wrong_ans_ids) < n_neg_samples + 1:
continue
if n_neg_samples == 0:
# use all the wrong answers (exclude the gold one that is mixed in)
n_wrongs = len(wrong_ans_ids) - 1
widx = 0
else:
# use a specified sample size
n_wrongs = n_neg_samples
qa_wrongs = []
while len(qa_wrongs) < n_wrongs:
if n_neg_samples == 0:
# choose the next wrong answer, skip over the gold answer.
wrong_ans_id = wrong_ans_ids[widx]
widx += 1
if wrong_ans_id == gold_ans_id:
wrong_ans_id = wrong_ans_ids[widx]
widx += 1
else:
# choose a new negative sample
wrong_ans_id = gold_ans_id
while wrong_ans_id == gold_ans_id:
wrong_ans_id = wrong_ans_ids[np.random.randint(len(wrong_ans_ids))]
tokids = answers.loc[wrong_ans_id].values[0].split(' ')
toks = vocab[np.array(tokids).astype(int)]
wrong_ans = ' '.join(toks)
qa_wrong = question + ' [SEP] ' + wrong_ans
qa_wrongs.append(qa_wrong)
qa_pairs.append((qa_gold, qa_wrong))
data_loader = DataLoader(
SEPairwiseDataset(qa_pairs),
batch_size=16,
num_workers=8
)
data = next(iter(data_loader))
return qa_pairs, data_loader, data
def construct_single_item_dataset(dataframe):
"""
Constructs a dataset where each element is a single QA pair. It contains all the sampled non-matching answers from
the given dataframe. A list of question IDs is returned to indicate which items relate to the same question,
along with a dict with question IDs as keys and the indexes of the gold answers within the answers for their
corresponding questions as items.
:param dataframe:
:return:
"""
# Get the positive (matching) qs and as from traindata and put into pairs
# Sample a number of negative (non-matching) qs and as from the answers listed for each question in traindata
qas = []
qids = []
aids = []
goldids = {}
for idx, qid in enumerate(dataframe.index):
# Reconstruct the text sequences for the training questions
tokids = questions.loc[qid].values[0].split(' ')
toks = vocab[np.array(tokids).astype(int)]
question = ' '.join(toks)
# Reconstruct the text sequences for random wrong answers
ans_ids = dataframe.loc[qid]["ansids"]
ans_ids = ans_ids.split(' ')
if len(ans_ids) < 2:
continue
ans_ids = np.unique(ans_ids) # make sure we don't have the same answer multiple times
gold_id = dataframe.loc[qid]["goldid"]
gold_id = gold_id.split(' ')
gold_id = gold_id[0]
for ans_idx, ans_id in enumerate(ans_ids):
tokids = answers.loc[ans_id].values[0].split(' ')
toks = vocab[np.array(tokids).astype(int)]
wrong_ans = ' '.join(toks)
qa_wrong = question + ' [SEP] ' + wrong_ans
qas.append(qa_wrong)
qids.append(qid)
aids.append(ans_id)
if ans_id == gold_id:
goldids[qid] = ans_id
if qid not in goldids:
print("Didn't find the goldid in the list of candidates for q %i. Gold ID = %s, answers = %s" %
(qid, dataframe.loc[qid]["goldid"], dataframe.loc[qid]["ansids"]))
data_loader = DataLoader(
SESingleDataset(qas, qids, aids, goldids),
batch_size=16,
num_workers=8
)
data = next(iter(data_loader))
return qas, qids, goldids, aids, data_loader, data
if __name__ == "__main__":
# Create the output dir
outputdir = './data/cqa_base_models/BERT_vec_pred'
if not os.path.exists(outputdir):
os.mkdir(outputdir)
# Our chosen topics
topic = sys.argv[1] # ['apple', 'cooking', 'travel']
print('Loading the training data for %s' % topic)
# Data directory:
datadir = './data/cqa_data/%s.stackexchange.com' % topic
# answers.tsv: each row corresponds to an answer; first column is answer ID; rows contain
# the space-separated IDs of the tokens in the answers.
# questions.tsv: as above but first column is question ID, rows contain space-separated token IDs of questions.
# train.tsv, test.tsv and valid.tsv contain the questions & candidates in each set. First column is question ID,
# second column is gold answer ID, third column is a space-separated list of candidate answer IDs.
# vocab.tsv is needed to retrieve the text of the questions and answers from the token IDs. First col is ID and
# second col is the token.
# load the vocab
vocab = pd.read_csv(os.path.join(datadir, 'vocab.tsv'), sep='\t', quoting=csv.QUOTE_NONE, header=None,
index_col=0, names=['tokens'], dtype=str, keep_default_na=False)["tokens"].values
# load the questions
questions = pd.read_csv(os.path.join(datadir, 'questions.tsv'), sep='\t', header=None, index_col=0)
# load the answers
answers = pd.read_csv(os.path.join(datadir, 'answers.tsv'), sep='\t', header=None, index_col=0)
# Load the training set
traindata = pd.read_csv(os.path.join(datadir, 'train.tsv'), sep='\t', header=None, names=['goldid', 'ansids'],
index_col=0)
tr_qa_pairs, tr_data_loader, tr_data = construct_pairwise_dataset(traindata, n_neg_samples=20)
qmax = 0
noverlength = 0
for q in tr_qa_pairs:
l = len(q[0].split(' '))
qmax = l if l > qmax else qmax
if l > 512:
noverlength += 1
print('QuestionAnswer max length: %i' % qmax)
print('Number over length = %i' % noverlength)
print('number of qas = %i' % len(tr_qa_pairs))
# Train the model ----------------------------------------------------------------------------------------------
bertcqa_model, device = train_bertcqa(tr_data_loader, 3, 42, os.path.join(outputdir, 'model_params_%s' % topic),
reload_model=True)
# Compute performance on training set --------------------------------------------------------------------------
# Training is very large, don't bother with this.
# print("Evaluating on training set:")
# tr_qas2, tr_qids2, tr_goldids2, tr_aids2, tr_data_loader2, tr_data2 = construct_single_item_dataset(traindata)
# evaluate_accuracy(bertcqa_model, tr_data_loader2, device)
# Compute performance on validation set ------------------------------------------------------------------------
# Load the validation set
# validationdata = pd.read_csv(os.path.join(datadir, 'valid.tsv'), sep='\t', header=None,
# names=['goldid', 'ansids'], index_col=0, nrows=2)
# va_qas, va_qids, va_goldids, va_aids, va_data_loader, va_data = construct_single_item_dataset(validationdata)
#
# print("Evaluating on validation set:")
# evaluate_accuracy(bertcqa_model, va_data_loader, device, va_qids, va_goldids)
# Compute performance on test set ------------------------------------------------------------------------------
# Load the test set
testdata = pd.read_csv(os.path.join(datadir, 'test.tsv'), sep='\t', header=None, names=['goldid', 'ansids'],
index_col=0)
te_qas, te_qids, te_goldids, te_aids, te_data_loader, te_data = construct_single_item_dataset(testdata)
print("Evaluating on test set:")
_, te_scores, te_vectors = evaluate_accuracy(bertcqa_model, te_data_loader, device)
# Output predictions in the right format for the GPPL experiments ----------------------------------------------
# Save predictions for the test data
fname_text = os.path.join(outputdir, '%s_text.tsv' % topic)
fname_numerical = os.path.join(outputdir, '%s_num.tsv' % topic)
# The text data and other info goes here:
text_df = pd.DataFrame(columns=['qid', 'answer', 'isgold'])
# Store the prediction and embedding vectors here:
numerical_data = np.empty((len(te_qids), 1 + bertcqa_model.embedding_size))
for i, qid in enumerate(te_qids):
if np.mod(i, 100) == 0:
print("Outputting qa pair %i / %i" % (i, len(te_qids)))
goldid = te_goldids[qid]
ansid = te_aids[i]
tokids = answers.loc[ansid].values[0].split(' ')
toks = vocab[np.array(tokids).astype(int)]
answer_text = ' '.join(toks)
score = te_scores[i]
vector = te_vectors[i]
isgold = True if goldid == ansid else False
text_df = text_df.append(
{'qid': qid, 'answer': answer_text, 'isgold': isgold},
ignore_index=True
)
numerical_data[i, 0] = score
numerical_data[i, 1:] = vector
text_df.to_csv(fname_text, sep='\t')
| pd.DataFrame(numerical_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import json
from bs4 import BeautifulSoup
import requests
import matplotlib.pyplot as plt
# save data
import pickle
def save(data,fileName):
with open(fileName+'.dat', 'wb') as f:
pickle.dump(data, f)
def load(fileName):
with open(fileName+'.dat', 'rb') as f:
new_data = pickle.load(f)
return new_data
# # all_coins_dict = json.loads(BeautifulSoup(
# # requests.get('https://api.coincap.io/v2/assets').content, "html.parser").prettify())
# all_coins_dict = requests.get('https://api.coincap.io/v2/assets').json()
# # print(all_coins_dict)
# all_coins_df = pd.DataFrame(all_coins_dict["data"])
# # print(all_coins_df)
# coins_by_mcap = all_coins_df[all_coins_df["marketCapUsd"].astype(float) > 1e9]
# coin_portfolio = coins_by_mcap['id']
# save(coin_portfolio,'coin_portfolio')
coin_portfolio = load('coin_portfolio')
# del(coin_portfolio['bitcoin-sv'])
# del(coin_portfolio['ethereum'])
# coin_portfolio.drop([,'ethereum'])
# indexVal = coin_portfolio[ coin_portfolio.values == 'bitcoin-sv' ].index
# coin_portfolio.drop(indexVal , inplace=True)
# indexVal = coin_portfolio[ coin_portfolio.values == 'ethereum' ].index
# coin_portfolio.drop(indexVal , inplace=True)
# indexVal = coin_portfolio[ coin_portfolio.values == 'nano' ].index
# coin_portfolio.drop(indexVal , inplace=True)
# indexVal = coin_portfolio[ coin_portfolio.values == 'zcash' ].index
# coin_portfolio.drop(indexVal , inplace=True)
# new_coins = pd.Series(['zcash'])
# not enough data on this api for nano
# coin_portfolio.append(new_coins)
print("Portfolio coins with MCAP > 1 Billion :\n",coin_portfolio.values)
# save(coin_portfolio,'coin_portfolio')
#Create a DataFrame that will hold all the price & data for all the selected coins
combined_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 5 15:40:55 2021
@author: liukang
"""
import numpy as np
import pandas as pd
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore')
disease_list=pd.read_csv('/home/liukang/Doc/Error_analysis/subgroup_in_previous_study.csv')
result = pd.read_csv('/home/liukang/Doc/AUPRC/test_result_10_No_Com.csv')
round_num = 1000
disease_final_result = pd.DataFrame()
test_total = pd.DataFrame()
for i in range(1,5):
test_data = pd.read_csv('/home/liukang/Doc/valid_df/test_{}.csv'.format(i))
test_total = | pd.concat([test_total,test_data]) | pandas.concat |
import os
try:
import pandas as pd
except:
os.system('pip3 install pandas')
import pandas as pd
try:
import psycopg2
except:
os.system('pip3 install psycopg2-binary')
import psycopg2
import numpy as np
from psycopg2.extensions import register_adapter, AsIs
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
class POSTGREE_WRITER:
def __init__(self, string_to_connect='postgresql://postgres:postgres@pdbserv:5432/postgres'):
self.string_to_connect = string_to_connect
self.conn = psycopg2.connect(self.string_to_connect)
self.cursor = self.conn.cursor()
def __create_DF(self, path='./habr_news/'):
DF_ALL = pd.DataFrame()
for file in os.listdir(path):
full_filename = '{}{}'.format(path, file)
DF = pd.read_parquet(full_filename)
DF_ALL = | pd.concat([DF_ALL, DF]) | pandas.concat |
"""
Functions used for pre-processing
"""
#import math
import pickle
#import copy
#import config
import os
# for multiprocessing
from functools import partial
from multiprocessing import Pool, cpu_count
from joblib import Parallel, delayed
import joblib
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
def load_results(filename):
""" Load a pickle file
"""
with open(filename, 'rb') as file_to_load:
data = pickle.load(file_to_load, encoding='bytes')
return data
def save_results(rootpath, filename, results):
""" Save results as a pickle file
"""
if not os.path.exists(rootpath):
os.makedirs(rootpath)
with open(rootpath + filename, 'wb') as file_to_save:
pickle.dump(results, file_to_save)
########## Code to perform Principal Component Analysis (PCA) on a covariate ###################
def do_pca_on_covariate(df_train, df_test, n_components=10, location='pacific', var_id='sst'):
""" Do PCA: learn PC loadings from training data,
and project test data onto corresponding directions
Args:
df_train: multi-index (spatial-temporal) pandas dataframe
-- Training data used to compute Principal axes in feature space
df_test: multi-index pandas dataframe -- Test data
n_components: int -- Number of components to keep
location: str -- location indicator of the climate variable
var_id: str -- climate variable to process
Returns:
df1: pandas dataframe -- PCs for training data
df2: pandas dataframe -- PCs for test data
"""
# check the legitimate of the given parameters
if not isinstance(df_train, pd.DataFrame):
if isinstance(df_train, pd.Series):
df_train = df_train.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Training data needs to be a pandas dataframe")
if not isinstance(df_test, pd.DataFrame):
if isinstance(df_test, pd.Series):
df_test = df_test.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Test data needs to be a pandas dataframe")
# check dataframe level!
if len(df_train.index.names) < 3 or len(df_test.index.names) < 3:
raise ValueError("Multiindex dataframe includes 3 levels: [lat,lon,start_date]")
# flatten the dataframe such that the number of
# samples equals the number of dates in the dataframe
# and the number of features equals to lat x lon
df_train_flat = df_train.unstack(level=[0, 1])
df_test_flat = df_test.unstack(level=[0, 1])
x_train = df_train_flat.to_numpy()
x_test = df_test_flat.to_numpy()
# make sure no NAN
if np.isnan(x_train).sum() > 0:
np.nan_to_num(x_train, 0)
if np.isnan(x_test).sum() > 0:
np.nan_to_num(x_test, 0)
# Initialize the PCA model such that it will reture the top n_components
pca = PCA(n_components=n_components)
# Fit the model with Xtrain and apply the dimensionality reduction on Xtrain.
pca_train = pca.fit_transform(x_train)
# Apply dimensionality reduction to Xtest
pca_test = pca.transform(x_test)
# Convert PCs of Xtrain and Xtest to pandas dataframe
col = ['{}_{}_pca_{}'.format(location, var_id, i) for i in range(n_components)]
df1 = pd.DataFrame(data=pca_train,
columns=col,
index=df_train_flat.index)
df2 = pd.DataFrame(data=pca_test,
columns=col,
index=df_test_flat.index)
return(df1, df2)
def get_pca_from_covariate(rootpath,
data,
var_name, var_location,
train_start, train_end,
test_start, test_end,
n_components=10):
""" Apply PCA on spatial-temporal Climate variables (Covariates),
e.g., Sea surface temperature (SST)
Args:
data: multi-index pandas dataframe -- raw covariate to apply PCA
var_name: str -- covariance name
var_location: str -- covariance location (pacific, atlantic, us, and global)
rootpath: str -- directory to save the results
train_start, train_end: pd.Timestamp() -- the start date and the end date of the training set
test_start, test_end: pd.Timestamp() -- the start date and the end date of the test set
"""
idx = pd.IndexSlice
# check the legitimate of the given parameters
if not isinstance(data, pd.DataFrame):
if isinstance(data, pd.Series):
data = data.to_frame() # convert pd.series to pd.dataframe
else:
raise ValueError("Covariate needs to be a pandas multiindex dataframe")
# check if the train start date and the train end date is out of range
if train_start < data.index.get_level_values('start_date')[0]:
raise ValueError("Train start date is out of range!")
if train_end > data.index.get_level_values('start_date')[-1]:
raise ValueError("Train end date is out of range!")
# check if the test start date and the test end date is out of range
if test_start < train_start:
raise ValueError("Test start date is out of range!")
if test_end < train_end or test_end > data.index.get_level_values('start_date')[-1]:
raise ValueError("Test end date is out of range!")
print('create training-test split')
train_x = data.loc[idx[:, :, train_start:train_end], :]
test_x = data.loc[idx[:, :, test_start:test_end], :]
# start PCA
print('start pca')
train_x_pca, test_x_pca = do_pca_on_covariate(train_x[var_name], test_x[var_name],
n_components, var_location, var_name)
# save PCA data
all_x_pca = train_x_pca.append(test_x_pca)
all_x_pca.to_hdf(rootpath + '{}_{}_pca_all.h5'.format(var_location, var_name),
key=var_name, mode='w')
########## Code to perform z-score on a time-series using long-term mean and std ############################
def get_mean(df1, var_id='tmp2m', date_id='start_date'):
""" Compute the mean and standard deviation of a covariate on the given period
Args:
d1: multi-index pandas dataframe -- covariate
var_id: str -- covariate name
date_id: str -- index column name for date
Return(s):
df1: multi-index pandas dataframe -- with month-day-mean-std added
"""
indexnames = df1.index.names
idxlevel = indexnames.index(date_id)
df1 = df1.assign(month=df1.index.get_level_values(idxlevel).month)
df1 = df1.assign(day=df1.index.get_level_values(idxlevel).day)
# get mean of each date
df1['{}_daily_mean'.format(var_id)] = df1.groupby(['month', 'day'])[var_id].transform('mean')
# get std of each date
df1['{}_daily_std'.format(var_id)] = df1.groupby(['month', 'day'])[var_id].transform('std')
return df1.fillna(0)
def add_month_day(df1, date_id='start_date'):
""" Extract the month-of-year and day-of-year from the date index,
and add it to the datafram
Args:
d1: multi-index pandas dataframe -- covariate
date_id: str -- index column name for date
"""
indexnames = df1.index.names
idxlevel = indexnames.index(date_id)
df1 = df1.assign(month=df1.index.get_level_values(idxlevel).month)
df1 = df1.assign(day=df1.index.get_level_values(idxlevel).day)
return(df1)
def zscore_temporal(rootpath,
data,
var,
train_start='1986-01-01', train_end='2016-12-31',
test_start='2017-01-01', test_end='2018-12-31',
date_id='start_date'):
""" Do zscore on time series only (no spatial information), e.g., pca of a covariate
Args:
rootpath: directory to save the results
data: pd.Dataframe -- dataframe contains data that is about to apply zscore
var: str -- variable name
train_start, train_end: str -- the start date and the end date of the training set
test_start, test_end: str -- the start date and the end date of the test set
date_id: str -- index column name for date
"""
# check the legitimate of the given parameters
if not isinstance(data, pd.DataFrame) and not isinstance(data, pd.Series):
raise ValueError("Data needs to be a pandas dataframe/series.")
idx = pd.IndexSlice
target = data[var].to_frame()
print('pre-process: {}'.format(var))
df1 = target.loc[idx[train_start:train_end], :] # train
df2 = target.loc[idx[test_start:test_end], :] # test
df1 = get_mean(df1, var)
# get first element of each group: mean for each location each month-day
month_day = df1.groupby(['month', 'day']).first()
month_day = month_day.reset_index()
# add month-day column to second dataframe
df2 = add_month_day(df2)
df2.reset_index(level=0, inplace=True)
var_cols = ['{}_daily_{}'.format(var, col_type) for col_type in ['mean', 'std']]
# add mean and std get from df1
df2 = df2.merge(month_day[['month', 'day'] + var_cols], how='left', on=['month', 'day'])
df2 = df2.sort_values(by=[date_id])
df2 = df2.set_index([date_id]) # add multi-index back
df1[var + '_zscore'] = (df1[var] - df1['{}_daily_mean'.format(var)]) / df1['{}_daily_std'.format(var)]
df2[var + '_zscore'] = (df2[var] - df2['{}_daily_mean'.format(var)]) / df2['{}_daily_std'.format(var)]
df_all = df1.append(df2)
df_all.to_hdf(rootpath + '{}_zscore.h5'.format(var), key=var, mode='w')
def zscore_spatial_temporal(rootpath,
target, var_id='tmp2m',
train_start='1986-01-01', train_end='2016-12-31',
test_start='2017-01-01', test_end='2018-12-31',
date_id='start_date'):
""" Apply zscore on spatial-temporal climate variable, e.g., the target variable tmp2m
Args:
rootpath: directory to save the results
data: pd.Dataframe -- dataframe contains data that is about to apply zscore
var_id: variable name
train_start, train_end: str -- the start date and the end date of the training set
test_start, test_end: str -- the start date and the end date of the test set
date_id: column name for time/date
"""
idx = pd.IndexSlice
df1 = target.loc[idx[:, :, train_start:train_end], :] # train
df2 = target.loc[idx[:, :, test_start:test_end], :]# test
# ---- Day-Month Mean of each location ---- #
# Add 'month', 'day' column, and get mean and std of each date, each location
df1 = df1.groupby(['lat', 'lon']).apply(lambda df: get_mean(df, var_id, date_id))
# get first element of each group: mean for each location each month-day
month_day = df1.groupby(['lat', 'lon', 'month', 'day']).first()
month_day = month_day.reset_index()
# add month-day column to second dataframe
df2 = df2.groupby(['lat', 'lon']).apply(lambda df: add_month_day(df, date_id))
df2.reset_index(level=2, inplace=True)
var_cols = ['{}_daily_{}'.format(var_id, col_type) for col_type in ['mean', 'std']]
# add mean and std get from df1
df2 = df2.merge(month_day[['lat', 'lon', 'month', 'day'] + var_cols],
how='left', on=['lat', 'lon', 'month', 'day'])
df2 = df2.sort_values(by=['lat', 'lon', date_id])
df2 = df2.set_index(['lat', 'lon', date_id]) # add multi-index back
df1[var_id+'_zscore'] = (df1[var_id] - df1['{}_daily_mean'.format(var_id)])/df1['{}_daily_std'.format(var_id)]
df2[var_id+'_zscore'] = (df2[var_id] - df2['{}_daily_mean'.format(var_id)])/df2['{}_daily_std'.format(var_id)]
df_all = df1.append(df2)
df_all.sort_index(level=['lat', 'lon'], inplace=True)
df_all.to_hdf(rootpath + 'target_{}_multitask_zscore.h5'.format(var_id), key=var_id, mode='w')
############## train-validation split ##################
def create_sequence_custom(today, time_frame, covariate_map, past_years=2,
curr_shift_days=[7, 14, 28], past_shift_days=[7, 14, 28]):
""" Feature aggregation: add features from past dates
Args:
today: pd.Timestamp() -- the date we want to aggregate feature
time_frame: pandas dataframe -- corresponding dates for covariate map
covariate_map: numpy array -- data/feature we use to aggregate
past_years: int -- number of years in the past to be included
curr_shift_days: list of int -- past dates/neighbors in the current year/most recent year to be included
past_shift_days: list of int -- both past and future dates/neighbors in the past year to be included
Return:
agg_x: numpy array -- the aggragated feature for the date provided by "today"
"""
combine = [today] + [today - pd.DateOffset(days=day) for day in curr_shift_days]
for k in range(past_years): # go to the past k years
today = today - | pd.DateOffset(years=1) | pandas.DateOffset |
import os
import itertools
from os.path import dirname, join, basename, exists
from typing import List, Tuple, Callable, Union
import cv2
import numpy as np
from numpy.lib.npyio import save
import pandas as pd
from tqdm import tqdm
from PIL import Image, ImageDraw
from openslide import OpenSlide
from ._functional import (
get_thumbnail,
get_downsamples,
try_thresholds,
resize,
detect_spots,
get_spots,
get_theta
)
from .preprocess.functional import preprocess, tissue_mask
from .helpers._utils import (
remove_extension, remove_images, flatten, multiprocess_map
)
from ._logger import logger
__all__ = [
'Dearrayer'
]
class Dearrayer(object):
"""
Cut TMA spots from a TMA array slide.
Args:
slide_path (str): Path to the TMA slide array. All formats that are
supported by openslide can be used.
threshold (int or float, optional): Threshold value for tissue
detection. Defaults to 1.1.
If threshold is an integer between [0, 255]:
This value will be used as an threshold for tissue detection.
Different thresholds can be easily searched with the
Cutter.try_thresholds() function.
If threshold is a float:
In this case, Otsu's binarization is used and the found
threshold is multiplied by `threshold` as Otsu isn't optimal
for histological images.
downsample (int, optional): Downsample used for the thumbnail. The user
might have to tweak this value depending on the magnification of the
slide. The TMA spot detection method is optimized for downsamlpe=64
when the magnification is 20x. If no spots are found, try adjusting
the downsample or tweak the spot detection variables with
``Dearrayer.try_spot_mask()`` function. Defaults to 64.
min_area_multiplier (float, optional): Remove all detected contours that
have an area smaller than ``median_area*min_area_multiplier``.
Defaults to 0.2.
max_area_multiplier (float, optional): Remove all detected contours that
have an area larger than ``median_area*max_area_multiplier``.
Defaults to None.
kernel_size (Tuple[int], optional): Kernel size used during spot
detection. Defaults to (8, 8).
create_thumbnail (bool, optional): Create a thumbnail if downsample is
not available. Defaults to False.
Raises:
IOError: slide_path not found.
ValueError: downsample is not available and create_thumbnail=False.
"""
def __init__(
self,
slide_path: str,
threshold: Union[int, float] = 1.1,
downsample: int = 64,
min_area_multiplier: float = 0.2,
max_area_multiplier: float = None,
kernel_size: Tuple[int] = (8, 8),
create_thumbnail: bool = False):
super().__init__()
# Define openslide reader.
if not exists(slide_path):
raise IOError(f'{slide_path} not found.')
self.openslide_reader = OpenSlide(slide_path)
# Assing basic stuff that user can see/check.
self.slide_path = slide_path
self.slide_name = remove_extension(basename(slide_path))
self.dimensions = self.openslide_reader.dimensions
self.downsample = downsample
self.threshold = threshold
self.min_area_multiplier = min_area_multiplier
self.max_area_multiplier = max_area_multiplier
self.kernel_size = kernel_size
self._spots_saved = False
# Get spots.
self._thumbnail = get_thumbnail(
slide_path=self.slide_path,
downsample=self.downsample,
create_thumbnail=create_thumbnail
)
if self._thumbnail is None:
# Downsample not available.
raise ValueError(
f'Thumbnail not available for downsample {self.downsample}. '
'Please set create_thumbnail=True or select downsample from\n\n'
f'{self._downsamples()}'
)
self.threshold, self._tissue_mask = tissue_mask(
image=self._thumbnail,
threshold=self.threshold,
return_threshold=True
)
self._spot_mask = detect_spots(
mask=self._tissue_mask,
min_area_multiplier=self.min_area_multiplier,
max_area_multiplier=self.max_area_multiplier,
kernel_size=self.kernel_size,
)
self._numbers, self._bounding_boxes = get_spots(
spot_mask=self._spot_mask,
downsample=self.downsample,
)
if self._numbers is None or self._bounding_boxes is None:
logger.warning(
'No spots detected from the slide! Please try and adjust, '
'the kernel_size, min_area_multiplier and max_area_multiplier '
'parameters using the dearrayer.try_spot_mask() function.'
)
self.spot_metadata = None
self._annotate()
else:
self.spot_metadata = pd.DataFrame(
np.hstack((self._numbers.reshape(-1, 1), self._bounding_boxes)))
self.spot_metadata.columns = [
'number', 'x', 'y', 'width', 'height']
self._annotate()
if len([x for x in self._numbers if '_' in x]) > 0:
logger.warning(
'Some spots were assinged the same number. Please check the '
f'annotated thumbnail for slide {self.slide_name}.'
)
def __repr__(self):
return self.__class__.__name__ + '()'
def __len__(self):
return len(self._bounding_boxes)
def summary(self):
"""Returns a summary of the dearraying process."""
print(self._summary())
def _summary(self, cut=False):
summary = (
f"{self.slide_name}"
f"\n Number of TMA spots: {len(self._bounding_boxes)}"
f"\n Downsample: {self.downsample}"
f"\n Threshold: {self.threshold}"
f"\n Min area multiplier: {self.min_area_multiplier}"
f"\n Max area multiplier: {self.max_area_multiplier}"
f"\n Kernel size: {self.kernel_size}"
f"\n Dimensions: {self.dimensions}"
)
if cut:
summary += (
f"\n Tile width: {self.width}"
f"\n Tile overlap: {self.overlap}"
f"\n Max background: {self.max_background}"
)
return summary
def get_thumbnail(self, max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the thumbnail for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Thumbnail.
"""
return resize(self._thumbnail, max_pixels)
def get_annotated_thumbnail(self,
max_pixels: int = 5_000_000) -> Image.Image:
"""
Returns an Pillow Image of the annotated thumbnail for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 5_000_000.
Returns:
Image.Image: Annotated thumbnail.
"""
return resize(self._annotated_thumbnail, max_pixels)
def get_tissue_mask(self, max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the tissue mask for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Tissue mask.
"""
mask = self._tissue_mask
# Flip for a nicer image.
mask = 1 - mask
mask = mask/mask.max()*255
mask = Image.fromarray(mask.astype(np.uint8))
return resize(mask, max_pixels)
def get_spot_mask(self, max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the TMA spot mask for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Spot mask.
"""
mask = self._spot_mask
# Flip for a nicer image.
mask = 1 - mask
mask = mask/mask.max()*255
mask = Image.fromarray(mask.astype(np.uint8))
return resize(mask, max_pixels)
def _annotate(self):
"""Draw bounding boxes and numbers to the thumbnail."""
fontsize = (self.spot_metadata.width.median()/6000)*70/self.downsample
self._annotated_thumbnail = self._thumbnail.copy()
if self.spot_metadata is None:
return
else:
annotated = ImageDraw.Draw(self._annotated_thumbnail)
# Bounding boxes.
for i in range(len(self)):
x, y, w, h = self._bounding_boxes[i]/self.downsample
annotated.rectangle(
[x, y, x+w, y+h], outline='red', width=round(fontsize*5))
arr = np.array(self._annotated_thumbnail)
# Numbers.
for i in range(len(self)):
x, y, w, h = self._bounding_boxes[i]/self.downsample
arr = cv2.putText(
arr,
str(self._numbers[i]),
(int(x+10), int(y-10+h)),
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontsize,
(0, 0, 255),
round(fontsize*3),
cv2.LINE_AA
)
self._annotated_thumbnail = Image.fromarray(arr)
def try_thresholds(
self,
thresholds: List[int] = [250, 240, 230,
220, 200, 190, 180, 170, 160, 150, 140],
max_pixels=1_000_000) -> Image.Image:
"""
Try out different thresholds for tissue detection.
The function prepares tissue masks with given thresholds and slaps them
all together in one summary image.
Args:
thresholds (List[int], optional): Thresholds to try. Defaults to
[250, 240, 230, 220, 200, 190, 180, 170, 160, 150, 140].
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: [description]
"""
return try_thresholds(thumbnail=self._thumbnail, thresholds=thresholds)
def try_spot_mask(
self,
min_area_multiplier: float = 0.1,
max_area_multiplier: float = 2,
kernel_size: Tuple[int] = (5, 5),
max_pixels: int = 1_000_000) -> Image.Image:
"""
Try out different values for TMA spot detection.
Args:
min_area_multiplier (float, optional): Increase if some of the small
shit is detected as a spot. Decrease if some spots are missed.
Defaults to 0.1.
max_area_multiplier (float, optional): Increase if some spots are
missed. Decrease if some large elements are detected as spots.
Defaults to 2.
kernel_size (Tuple[int], optional): Increase with a small downsample
and vice versa. Defaults to (5, 5).
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: TMA spot massk.
"""
mask = detect_spots(
image=self._thumbnail,
mask=self._tissue_mask,
min_area_multiplier=min_area_multiplier,
max_area_multiplier=max_area_multiplier,
kernel_size=kernel_size,
)
# Flip for a nicer image.
mask = 1 - mask
mask = mask/mask.max()*255
mask = Image.fromarray(mask.astype(np.uint8))
return resize(mask, max_pixels)
def _prepare_directories(self, output_dir: str) -> None:
out_dir = join(output_dir, self.slide_name)
# Save paths.
self._thumb_path = join(out_dir, f'thumbnail_{self.downsample}.jpeg')
self._annotated_path = join(out_dir, 'thumbnail_annotated.jpeg')
self._spot_meta_path = join(out_dir, 'spot_metadata.csv')
self._tile_meta_path = join(out_dir, 'metadata.csv')
self._image_dir = join(out_dir, 'spots')
self._tile_dir = join(out_dir, 'tiles')
self._summary_path = join(out_dir, 'summary.txt')
# Make dirs.
os.makedirs(self._image_dir, exist_ok=True)
def save_spots(
self,
output_dir: str,
overwrite: bool = False,
image_format: str = 'jpeg',
quality: int = 95) -> pd.DataFrame:
"""
Save TMA spots, coordinates and spot numbering.
Args:
output_dir (str): Parent directory for all output.
overwrite (bool, optional): This will **remove** all saved images,
thumbnail and metadata and save images again.. Defaults to
False.
image_format (str, optional): Format can be jpeg or png. Defaults
to 'jpeg'.
quality (int, optional): For jpeg compression. Defaults to 95.
Raises:
ValueError: Invalid image format.
Returns:
pd.DataFrame: Coordinates and spot numbers.
"""
allowed_formats = ['jpeg', 'png']
if image_format not in allowed_formats:
raise ValueError(
'Image format {} not allowed. Select from {}'.format(
image_format, allowed_formats
))
self._prepare_directories(output_dir)
# Check if slide has been cut before.
if exists(self._thumb_path) and not overwrite:
logger.warning(
'Spots have already been cut! Please set overwrite=True if '
'you wish to save them again.'
)
self.spot_metadata = pd.read_csv(self._spot_meta_path)
self._spots_saved = True
return self.spot_metadata
elif exists(self._thumb_path) and overwrite:
# Remove all previous files.
os.remove(self._annotated_path)
remove_images(self._image_dir)
# Save text summary.
with open(self._summary_path, "w") as f:
f.write(self._summary())
# Save both thumbnails.
self._thumbnail.save(self._thumb_path, quality=95)
self._annotated_thumbnail.save(self._annotated_path, quality=95)
# Multiprocessing to speed things up!
data = list(zip(self._numbers, self._bounding_boxes))
spot_paths = multiprocess_map(
func=save_spot,
func_args={
'slide_path': self.slide_path,
'image_dir': self._image_dir,
'image_format': image_format,
'quality': quality,
},
lst=data,
total=len(data),
desc=self.slide_name,
)
# Finally save metadata.
self.spot_metadata['path'] = spot_paths
self.spot_metadata.to_csv(self._spot_meta_path, index=False)
self._spots_saved = True
return self.spot_metadata
def save_tiles(
self,
width: int,
overlap: float = 0.0,
max_background: float = 0.999,
overwrite: bool = False,
image_format: str = 'jpeg',
quality: int = 95,
custom_preprocess: Callable[[Image.Image], dict] = None
) -> pd.DataFrame:
"""
Cut tiles from dearrayed TMA spots.
Args:
width (int): Tile width.
overlap (float, optional): Overlap between neighbouring tiles.
Defaults to 0.0.
max_background (float, optional): Maximum amount of background
allowed for a tile. Defaults to 0.999.
overwrite (bool, optional): This will **remove** the tiles directory
completely and save all the tiles again. Defaults to False.
image_format (str, optional): Format can be jpeg or png. Defaults
to 'jpeg'.
quality (int, optional): For jpeg compression. Defaults to 95.
custom_preprocess (Callable[[Image.Image], dict], optional): This is
intended for users that want to define their own preprocessing
function. The function must take a Pillow image as an input and
return a dictionary of desired metrics. Defaults to None.
Raises:
ValueError: Invalid image format.
IOError: Spots have not been saved first with Dearrayer.save().
IOError: No spot paths found.
Returns:
pd.DataFrame: Metadata.
"""
allowed_formats = ['jpeg', 'png']
if image_format not in allowed_formats:
raise ValueError(
'Image format {} not allowed. Select from {}'.format(
image_format, allowed_formats
))
if not self._spots_saved:
raise IOError('Please save the spots first with Dearrayer.save()')
if exists(self._tile_dir) and overwrite == False:
logger.warning(
f'{self._tile_dir} already exists! If you want to save tiles '
'again please set overwrite=True.'
)
return pd.read_csv(self._tile_meta_path)
else:
# Create the tiles directory
os.makedirs(self._tile_dir, exist_ok=True)
# Update summary.txt
self.width = width
self.overlap = overlap
self.max_background = max_background
with open(self._summary_path, "w") as f:
f.write(self._summary(cut=True))
# Let's collect all spot paths.
spot_paths = self.spot_metadata['path'].tolist()
# Remove nan paths.
spot_paths = [x for x in spot_paths if isinstance(x, str)]
if len(spot_paths) == 0:
raise IOError('No spot paths found!')
# Wrap the saving function so it can be parallized.
metadata = multiprocess_map(
func=save_tile,
func_args={
'image_dir': self._tile_dir,
'width': width,
'overlap': overlap,
'threshold': self.threshold,
'max_background': max_background,
'image_format': image_format,
'quality': quality,
'custom_preprocess': custom_preprocess
},
lst=spot_paths,
total=len(spot_paths),
desc='Cutting tiles',
)
metadata = list(filter(lambda x: x is not None, metadata))
metadata = flatten(metadata)
if len(metadata) == 0:
logger.warning(f'No tiles saved from any of the spots!')
return None
# Save metadata.
self.tile_metadata = | pd.DataFrame(metadata) | pandas.DataFrame |
from .busSim.manager import managerFactory
from .result.searchResult import SearchResult
from .util import gen_start_time, transform
from .gtfs_edit import copy_with_edits
from .service.yelp import get_results
from .census import Census
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
from shapely.wkt import loads
from pyproj import Transformer
from zipfile import ZipFile
from io import TextIOWrapper
import os
from pathlib import Path
from math import ceil, floor
from collections import defaultdict
import time
class SCanalyzer:
def __init__(self, gtfs_path):
self.gtfs_path = gtfs_path
self.orig_gtfs_path = gtfs_path
self.base_out_path = self._get_out_path()
self.out_path = self.base_out_path
self._preprocess_gtfs()
def gtfs_edit(self, edit_fn, route, from_orig=True):
orig_gtfs_name = os.path.basename(self.orig_gtfs_path)
modified_gtfs_name = f"{edit_fn.__name__}-{route}-{orig_gtfs_name}"
modified_gtfs_path = os.path.join(
self.base_out_path, modified_gtfs_name)
from_path = self.orig_gtfs_path if from_orig else self.gtfs_path
copy_with_edits(from_path, modified_gtfs_path, edit_fn, route)
self.gtfs_path = modified_gtfs_path
def set_batch_label(self, label):
self.out_path = os.path.join(self.base_out_path, label)
Path(self.out_path).mkdir(parents=True, exist_ok=True)
def reset_batch_label(self):
self.out_path = self.base_out_path
def search(self, config, perf_df=None):
# prerun check
if not config.is_runnable():
raise Exception("The current config is not runnable")
# dynamically init a manager
manager = managerFactory.create(
config.get_run_env(), gtfs_path=self.gtfs_path, out_path=self.out_path, borders=self.borders)
result_df = manager.run_batch(config, perf_df)
return result_df
def load_census(self, cache=True):
"""
Looks for a stops.csv file in data/mmt_gtfs, queries TigerWeb Census API to pull out census tracts
based on the center and radius of the system. An optional addition of 1km (default) is added to the radius.
From the tracts, and a default set of demographs the ACS 5-year 2019 dataset is queried to get the demographics
data for each tract. A few statistics are computed. It returns a geodataframe with all of this information and
saves it to the output folder.
cache default=True, if true will load a saved result and return
"""
# Pull from Cache and return:
cache_path = os.path.join(self.base_out_path, "census.csv")
if cache and os.path.exists(cache_path):
census_df = pd.read_csv(cache_path)
return self._csvdf_to_gdf(census_df)
# Create the Geodataframe:
c = Census(gtfs_filename="../data/mmt_gtfs/stops.csv")
gdf_tracts = c.getCensusTracts()
demographic_data = c.getDemographicsData(
gdf_tracts, demographics=['Race', 'Vehicles'])
# Save output:
demographic_data.to_csv(cache_path, index=False)
return self._csvdf_to_gdf(demographic_data)
def load_yelp(self, api_key, services=["banks", "clinics", "dentists", "hospitals", "supermarket"], cache=True):
cache_path = os.path.join(self.base_out_path, "services.csv")
if cache and os.path.exists(cache_path):
return | pd.read_csv(cache_path) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn import ensemble, feature_extraction, preprocessing
from sklearn.calibration import CalibratedClassifierCV
# import data
train = | pd.read_csv('../input/train.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from plots import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
'''
Notice: This computer software was prepared by Battelle Memorial Institute, hereinafter the Contractor, under Contract
No. DE-AC05-76RL01830 with the Department of Energy (DOE). All rights in the computer software are reserved by DOE on
behalf of the United States Government and the Contractor as provided in the Contract. You are authorized to use this
computer software for Governmental purposes but it is not to be released or distributed to the public. NEITHER THE
GOVERNMENT NOR THE CONTRACTOR MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS
SOFTWARE. This notice including this sentence must appear on any copies of this computer software.
'''
'''
This class implements repo centric methods.
These metrics assume that the data is in the order id,created_at,type,actor.id,repo.id
'''
'''
This method returns the distributon for the diffusion delay.
Question #1
Inputs: DataFrame - Data
eventType - A list of events to filter data on
unit - Time unit for time differences, e.g. "s","d","h"
metadata_file - CSV file with repo creation times. Otherwise use first repo observation as proxy for creation time.
Output: A list (array) of deltas in days
'''
def getRepoDiffusionDelay(df,eventType=None,unit='h',metadata_file = '', plot=False, saveData=False):
if metadata_file != '':
repo_metadata = pd.read_csv(metadata_file)
repo_metadata = repo_metadata[['full_name_h','created_at']]
repo_metadata['created_at'] = pd.to_datetime(repo_metadata['created_at'])
#Standardize Time and Sort Dataframe
df.columns = ['time','event','user','repo']
#Checks for specific event type, uses both Fork and WatchEvent
if eventType is not None:
df = df[df.event.isin(eventType)]
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
if metadata_file != '':
df = df.merge(repo_metadata,left_on='repo',right_on='full_name_h',how='left')
df = df[['repo','created_at','time']].dropna()
df['delta'] = (df['time']-df['created_at']).apply(lambda x: int(x / np.timedelta64(1, unit)))
else:
#Find difference between event time and "creation time" of repo
#Creation time is first seen event
creation_day = df['time'].min()
df['delta'] = (df['time']-creation_day).apply(lambda x: int(x / np.timedelta64(1, unit)))
df = df.iloc[1:]
delta = df['delta'].values
if plot==False:
return delta
##############
## Plotting ##
##############
if eventType is not None:
eventList = []
for ele in eventType:
eventList.append(ele[:-5])
eventType = '/'.join(eventList)
else:
eventType = 'All'
unit_labels = {'s':'Seconds',
'h':'Hours',
'd':'Days'}
##To Save or not
if saveData != False:
plot_histogram(delta,unit_labels[unit] + ' Between '+eventType+' Event and Creation Event','Number of Events','Diffusion Delay',loc=saveData + '_histogram.png')
##plotting line graph
plot_line_graph(delta,'Event Number','Delta between '+eventType+' Event and Creation','Diffusion Delay',labels=eventType,loc=saveData + '_linegraph.png')
else:
print(plot_histogram(delta,unit_labels[unit] + ' Between '+eventType+' Event and Creation Event','Number of Events','Diffusion Delay',loc=saveData))
##plotting line graph
print(plot_line_graph(delta,'Event Number','Delta between '+eventType+' Event and Creation','Diffusion Delay',labels=eventType,loc=saveData))
return delta
'''
This method returns the growth of a repo over time.
Question #2
Input: df - Dataframe of all data for a repo
cumSum - This is a boolean that indicates if the dataframe should be cumuluative over time.
output - A dataframe that describes the repo growth. Indexed on time.
'''
def getRepoGrowth(df, cumSum=False, plot=False, saveData=False):
df.columns = ['time', 'event','user', 'repo']
df['id'] = df.index
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
df.set_index('time', inplace=True)
df['hour'] = df.index.hour
df['day'] = df.index.day
df['month'] = df.index.month
df['year'] = df.index.year
#get daily event counts
p = df[['year', 'month', 'day', 'id']].groupby(['year', 'month', 'day']).count()
p = pd.DataFrame(p).reset_index()
#get cumulative sum of daily event counts
if cumSum == True:
p['id'] = p.cumsum(axis=0)['id']
p.columns = ['year', 'month', 'day', 'value']
p['date'] = p.apply(lambda x: datetime.strptime("{0} {1} {2}".format(x['year'], x['month'], x['day']), "%Y %m %d"), axis=1)
p['date'] = pd.to_datetime(p['date'].dt.strftime('%Y-%m-%d'))
p = p.set_index(p['date'])
del p['year']
del p['month']
del p['day']
del p['date']
p = p.reset_index()
if plot== False:
return p
##############
## Plotting ##
##############
cumTitle = ''
if cumSum:
cumTitle = 'Cumulative Sum of '
if saveData != False:
plot_time_series(p,'Time','Total Number of Events', cumTitle + 'Events Over Time', loc=saveData+'_time_series_cumsum'+str(cumSum)+'.png')
#To mimic PNNL Graph, run with CumSum as False
plot_histogram(p['value'].values,'Events Per Day',cumTitle + 'Total Number of Days','Distribution Over Daily Event Counts', loc=saveData + 'histogram_cumsum' +str(cumSum)+'.png')
else:
print(plot_time_series(p,'Time','Total Number of Events',cumTitle + 'Events Over Time'))
#To mimic PNNL Graph, run with CumSum as False
print(plot_histogram(p['value'].values,'Events Per Day',cumTitle + 'Total Number of Days','Distribution Over Daily Event Counts'))
return p
'''
This method returns the the number of events on a repo before it "dies" (deleted or no activity)
Question #2
Input - Dataframe of a repo
Output - Number of events before death
'''
def getLifetimeDepth(df):
df.columns = ['time','event','user','repo']
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
return len(df)
'''
Time from creation to "death" of repo (deleted or no activity)
Question #2
Input - Dataframe of a repo
Output - Time from creation to "death" (default is days)
'''
def getLifetimeTime(df):
df.columns = ['time', 'event', 'user', 'repo']
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
p = pd.DataFrame(df.iloc[[0, -1]])
p['delta'] = (p['time'] - p['time'].shift()).fillna(0)
p['ans'] = p['delta'].apply(lambda x: x.total_seconds()).astype('int64') / (24 * 60 * 60)
return p['ans'].values[1]
'''
Calcultes the number of contributers, with or without duplicates.
Question # 4
Input: df - Data frame can be repo or non-repo centric
dropDup - Boolean to indicate whether or not the metric should contain duplicate users (on a daily basis), if None run Both
cumaltive - Boolean to indicate whether or not the metric should be cumulative over time
'''
def getContributions(df,dropDup=False,cumulative=False, plot=False, saveData=False, wfEvents=True):
def contributionsHelper(df,dropDup,cumulative):
if dropDup:
df = df.drop_duplicates(subset=['user'])
p = df[['user', 'year', 'month', 'day']].groupby(['year', 'month', 'day']).nunique()
p = pd.DataFrame(p)
del p['day']
del p['year']
del p['month']
p = p.reset_index()
if cumulative:
p['user'] = p.cumsum(axis=0)['user']
p['date'] = p.apply(lambda x: datetime.strptime("{0} {1} {2}".format(x['year'], x['month'], x['day']), "%Y %m %d"),
axis=1)
p['date'] = p['date'].dt.strftime('%Y-%m-%d')
p['date'] = pd.to_datetime(p['date'])
del p['year']
del p['month']
del p['day']
return p
df.columns = [ 'time', 'event', 'user', 'repo']
df['time'] = pd.to_datetime(df['time'])
if wfEvents == True:
df = df[(df.event != 'ForkEvent') & (df.event != 'WatchEvent')]
df = df.sort_values(by='time')
df.set_index('time', inplace=True)
df['hour'] = df.index.hour
df['day'] = df.index.day
df['month'] = df.index.month
df['year'] = df.index.year
#Running Both Duplicate and Non Duplicate
if dropDup == None:
#run both
noDups = contributionsHelper(df, True, cumulative)
containsDup = contributionsHelper(df,False, cumulative)
noDups = noDups.reset_index(drop=True)
containsDup = containsDup.reset_index(drop=True)
##############
## Plotting ##
##############
if saveData != False:
plot_contributions_twolines(containsDup,noDups,'Time','Number of Users','Number of Contributing Users Over Time', loc=saveData + '_containsDup_contributions_cumulative_' +str(cumulative)+ '.png')
else:
print(plot_contributions_twolines(containsDup,noDups,'Time','Number of Users','Cumulative Number of Contributing Users Over Time'))
return noDups, containsDup
else:
results = contributionsHelper(df,dropDup, cumulative)
##############
## Plotting ##
##############
title = 'with'
if dropDup:
title = 'without'
cumTitle = ''
if cumulative:
cumTitle = 'Cumulative '
p = results
p = p.rename(columns={"user": "value"})
if not plot:
return p
sns.set_style('whitegrid')
sns.set_context('talk')
if saveData != False:
# To mimic PNNl's output have the cumulative as True
plot_contributions_oneline(p,'Time','Number of Users',cumTitle +'Number of Users Over Time', loc=saveData + '_one_line_no_duplicates_cumsum' + str(cumulative) + '.png')
#To mimic PNNL's output have cumulative as False
plot_histogram(p.value.values,'Total Number of Contributors','Days',cumTitle+'Distribution of Unique Contributors '+title+' Duplicates', loc=saveData + '_histogram_no_duplicates_cumsum' + str(cumulative) + '.png')
else:
# To mimic PNNl's output have the cumulative as True
#plot_contributions_oneline(p,'Time','Number of Users', cumTitle + 'Number of Contributing Users Over Time')
#To mimic PNNL's output have cumulative as False
#plot_histogram(p.user.values,'Total Number of Contributors','Days', cumTitle + 'Distribution of Unique Contributors '+title+' Duplicates')
pass
return p
'''
This method returns the average time between events for each repo
NOTE: Multithreading is highly recommended for datasets with more than 5000 repos.
Question #12
Inputs: df - Data frame of all data for repos
repos - (Optional) List of specific repos to calculate the metric for
nCPu - (Optional) Number of CPU's to run metric in parallel
Outputs: A list of average times for each repo. Length should match number of repos. Elements with NaN correspond to a
repo only having a single event.
'''
def getAvgTimebwEvents(df,repos=None, nCPU=1, plot=False, saveData=False):
# Standardize Time and Sort Dataframe
df.columns = ['time', 'event', 'user', 'repo']
df['time'] = pd.to_datetime(df['time'])
if repos == None:
repos = df['repo'].unique()
p = Pool(nCPU)
args = [(df, repos[i]) for i, item_a in enumerate(repos)]
deltas = p.map(getMeanTimeHelper,args)
p.join()
p.close()
if plot==False:
return deltas
if saveData != False:
plot_histogram(deltas,'Time Between PullRequestEvents in Seconds','Number of Repos','Average Time Between PullRequestEvents for ' + community, loc=saveData + '_histogram.png')
else:
print(plot_histogram(deltas,'Time Between PullRequestEvents in Seconds','Number of Repos','Average Time Between PullRequestEvents for' + community))
'''
Helper function for getting the average time between events
Inputs: Same as average time between events
Output: Same as average time between events
'''
def getMeanTime(df, r):
d = df[df.repo == r]
d = d.sort_values(by='time')
delta = np.mean(np.diff(d.time)) / np.timedelta64(1, 's')
return delta
def getMeanTimeHelper(args):
return getMeanTime(*args)
return deltas
'''
This method returns the distribution for each event over time or by weekday. Default is over time.
Question #5
Inputs: df - Data frame of all data for repos
nCPu - (Optional) Number of CPU's to run metric in parallel
weekday - (Optional) Boolean to indicate whether the distribution should be done by weekday. Default is False.
Output: Dataframe with the distribution of events by weekday. Columns: Event, Weekday, Count
'''
def getDistributionOfEvents(df,nCPU = 1,weekday=False, plot=False, saveData=False):
df.columns = ['time','event','user','repo']
df['id'] = df.index
df_split = np.array_split(df,nCPU)
pool = Pool(nCPU)
distribution_partial = partial(processDistOfEvents, weekday=weekday)
df_list = pool.map(distribution_partial,df_split)
pool.close()
pool.join()
# Merge List into a single dataframe
sum_col = "user" if weekday else "id"
if weekday:
columns = ['event', 'weekday']
else:
columns = ['event', 'date']
df_1 = df_list[0]
for i in range(1, len(df_list)):
df_1 = pd.merge(df_1, df_list[i], on=columns, how='outer')
df_1 = df_1[columns + ['value']]
if plot==False:
return df_1
##############
## Plotting ##
##############
if saveData != False:
plot_distribution_of_events(df_1,weekday, loc=saveData + '.png')
else:
print(plot_distribution_of_events(df_1,weekday))
return df_1
'''
Helper Function for getting the Dist. of Events per weekday.
'''
def processDistOfEvents(df,weekday):
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
df.set_index('time', inplace=True)
df['hour'] = df.index.hour
df['day'] = df.index.day
df['month'] = df.index.month
df['year'] = df.index.year
if weekday:
df['weekday'] =df.apply(lambda x:datetime(x['year'],x['month'],x['day']).weekday(),axis=1)
p = df[['event','user','weekday']].groupby(['event','weekday']).count()
p.columns = ['value']
p = p.reset_index()
return p
else:
p = df[['event', 'year', 'month', 'day','id']].groupby(['event', 'year', 'month','day']).count()
p = pd.DataFrame(p).reset_index()
p.columns = ['event', 'year', 'month','day','value']
p['date'] = p.apply(lambda x: datetime.strptime("{0} {1} {2}".format(x['year'], x['month'],x['day']), "%Y %m %d"), axis=1)
p['date'] = p['date'].dt.strftime('%Y-%m-%d')
p = p.reset_index()
return p
'''
This method returns the distribution of event type per repo e.g. x repos with y number of events, z repos with n
amounts of events.
Question #12,13,14
Inputs: df - Data frame with data for all repos
eventType - Event time to get distribution over
Outputs: Dataframe with the distribution of event type per repo. Columns are repo id and the count of that event.
'''
def getDistributionOfEventsByRepo(df,eventType='WatchEvent', plot=False, saveData=False,log=True):
df.columns = ['time', 'event', 'user', 'repo']
df = df[df.event == eventType]
p = df[['repo', 'event']].groupby(['repo']).count()
p = p.sort_values(by='event')
p.columns = ['value']
p = p.reset_index()
if plot == False:
return p
##############
## Plotting ##
##############
ylabel = 'Number of Repos '
if log == True:
ylabel += '(Log)'
if saveData != False:
plot_histogram(p['value'].values,'Number of Events',ylabel,'Distribution of '+eventType+' across Repos',log=log, loc=saveData + '_histogram.png')
else:
print(plot_histogram(p['value'].values,'Number of Events',ylabel,'Distribution of '+eventType+' across Repos',log=log))
return p
'''
This method returns the top-k repos by event count for a certain event type
Question #12,13
Inputs: df - Data frame with data for all repos
eventType - Event time to get distribution over
Outputs: Dataframe with the top-k repos and their event counts. Columns are repo id and the count of that event.
'''
def getTopKRepos(df,k=100,eventType='WatchEvent',plot=False,saveData=False):
df.columns = ['time', 'event', 'user', 'repo']
df = df[df.event == eventType]
p = df[['repo', 'event']].groupby(['repo']).count()
p = p.sort_values(by='event',ascending=False)
p.columns = ['value']
return p.head(k)
'''
This method returns the distribution of repo life over the dataframe. Repo life is defined from the first time a repo
event is seen or created to when it is deleted or the last event in the dataframe.
Question #12
Inputs: df - Data frame with the data for all repos
Outputs: List of deltas for each repos lifetime.
'''
def getDisributionOverRepoLife(df, plot=False, log = True, saveData=False):
df.columns = ['time','event','user', 'repo']
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
df_max = df[['repo', 'time']].groupby('repo').max()
df_min = df[['repo', 'time']].groupby('repo').min()
df_min = df_min.reset_index()
df_max = df_max.reset_index()
df_min.columns = ['repo', 'minTime']
m = df_min.merge(df_max)
m['delta'] = m[['time']].sub(m['minTime'], axis=0)
delta = m['delta'].values
#Converts deltas to days (modify as necessary)
delta = delta / (10 ** 9) / 86400
delta = [int(x) for x in delta if int(x) <= 25]
if plot == False:
return delta
##############
## Plotting ##
##############
if saveData != False:
plot_histogram(delta,'Length of Repo Life in Days','Number of Repos (Log)','Distrbution of Repo Life',log=log, loc=saveData + '_histogram.png')
else:
print(plot_histogram(delta,'Length of Repo Life in Days','Number of Repos (Log)','Distrbution of Repo Life',log=log))
return delta
'''
This method returns the gini coefficient for the data frame.
Question #6,15
Input: df - Data frame containing data can be any subset of data
type - (Optional) This is the type of gini coefficient. Options: user or repo (case sensitive)
Output: g - gini coefficient
'''
def getGiniCoef(df, type='repo', plot=False, saveData=False):
df.columns = ['time', 'event' ,'user', 'repo']
df = df[['repo', 'user']].groupby([type]).count()
df.columns = ['counts']
df = df.reset_index()
values = df['counts'].values
values = np.sort(np.array(values))
cdf = np.cumsum(values) / float(np.sum(values))
percent_nodes = np.arange(len(values)) / float(len(values))
g = 1 - 2*np.trapz(x=percent_nodes,y=cdf)
if plot == False:
return g
x = cdf
y = percent_nodes
data = | pd.DataFrame({'cum_nodes': y, 'cum_value': x}) | pandas.DataFrame |
#!/usr/bin/env python3
import logging
import math
from asyncio.log import logger
from copy import copy
from datetime import datetime, timedelta
import arrow
import numpy as np
import pandas as pd
import requests
from electricitymap.contrib.config import ZONES_CONFIG
from parsers.lib.config import refetch_frequency
from . import DK, ENTSOE, statnett
ZONE_CONFIG = ZONES_CONFIG["NL"]
@refetch_frequency(timedelta(days=1))
def fetch_production(
zone_key="NL",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
):
if target_datetime is None:
target_datetime = arrow.utcnow()
else:
target_datetime = arrow.get(target_datetime)
r = session or requests.session()
consumptions = ENTSOE.fetch_consumption(
zone_key=zone_key, session=r, target_datetime=target_datetime, logger=logger
)
if not consumptions:
return
for c in consumptions:
del c["source"]
df_consumptions = pd.DataFrame.from_dict(consumptions).set_index("datetime")
# NL has exchanges with BE, DE, NO, GB, DK-DK1
exchanges = []
for exchange_key in ["BE", "DE", "GB"]:
zone_1, zone_2 = sorted([exchange_key, zone_key])
exchange = ENTSOE.fetch_exchange(
zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=target_datetime,
logger=logger,
)
if not exchange:
return
exchanges.extend(exchange or [])
# add NO data, fetch once for every hour
# This introduces an error, because it doesn't use the average power flow
# during the hour, but rather only the value during the first minute of the
# hour!
zone_1, zone_2 = sorted(["NO", zone_key])
exchange_NO = [
statnett.fetch_exchange(
zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=dt.datetime,
logger=logger,
)
for dt in arrow.Arrow.range(
"hour",
arrow.get(min([e["datetime"] for e in exchanges])).replace(minute=0),
arrow.get(max([e["datetime"] for e in exchanges])).replace(minute=0),
)
]
exchanges.extend(exchange_NO)
# add DK1 data (only for dates after operation)
if target_datetime > arrow.get("2019-08-24", "YYYY-MM-DD"):
zone_1, zone_2 = sorted(["DK-DK1", zone_key])
df_dk = pd.DataFrame(
DK.fetch_exchange(
zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=target_datetime,
logger=logger,
)
)
# Because other exchanges and consumption data is only available per hour
# we floor the timpstamp to hour and group by hour with averaging of netFlow
df_dk["datetime"] = df_dk["datetime"].dt.floor("H")
exchange_DK = (
df_dk.groupby(["datetime"])
.aggregate({"netFlow": "mean", "sortedZoneKeys": "max", "source": "max"})
.reset_index()
)
# because averaging with high precision numbers leads to rounding errors
exchange_DK = exchange_DK.round({"netFlow": 3})
exchanges.extend(exchange_DK.to_dict(orient="records"))
# We want to know the net-imports into NL, so if NL is in zone_1 we need
# to flip the direction of the flow. E.g. 100MW for NL->DE means 100MW
# export to DE and needs to become -100MW for import to NL.
for e in exchanges:
if e["sortedZoneKeys"].startswith("NL->"):
e["NL_import"] = -1 * e["netFlow"]
else:
e["NL_import"] = e["netFlow"]
del e["source"]
del e["netFlow"]
df_exchanges = pd.DataFrame.from_dict(exchanges).set_index("datetime")
# Sum all exchanges to NL imports
df_exchanges = df_exchanges.groupby("datetime").sum()
# Fill missing values by propagating the value forward
df_consumptions_with_exchanges = df_consumptions.join(df_exchanges).fillna(
method="ffill", limit=3
) # Limit to 3 x 15min
# Load = Generation + netImports
# => Generation = Load - netImports
df_total_generations = (
df_consumptions_with_exchanges["consumption"]
- df_consumptions_with_exchanges["NL_import"]
)
# Fetch all production
productions = ENTSOE.fetch_production(
zone_key=zone_key, session=r, target_datetime=target_datetime, logger=logger
)
if not productions:
return
# Flatten production dictionaries (we ignore storage)
for p in productions:
# if for some reason theré's no unknown value
if not "unknown" in p["production"] or p["production"]["unknown"] == None:
p["production"]["unknown"] = 0
Z = sum([x or 0 for x in p["production"].values()])
# Only calculate the difference if the datetime exists
# If total ENTSOE reported production (Z) is less than total generation
# (calculated from consumption and imports), then there must be some
# unknown production missing, so we add the difference.
# The difference can actually be negative, because consumption is based
# on TSO network load, but locally generated electricity may never leave
# the DSO network and be substantial (e.g. Solar).
if (
p["datetime"] in df_total_generations
and Z < df_total_generations[p["datetime"]]
):
p["production"]["unknown"] = round(
(df_total_generations[p["datetime"]] - Z + p["production"]["unknown"]),
3,
)
# Add capacities
solar_capacity_df = get_solar_capacities()
wind_capacity_df = get_wind_capacities()
for p in productions:
p["capacity"] = {
"solar": round(get_solar_capacity_at(p["datetime"], solar_capacity_df), 3),
"wind": round(get_wind_capacity_at(p["datetime"], wind_capacity_df), 3),
}
# Filter invalid
# We should probably add logging to this
return [p for p in productions if p["production"]["unknown"] > 0]
def fetch_production_energieopwek_nl(
session=None, target_datetime=None, logger=logging.getLogger(__name__)
) -> list:
if target_datetime is None:
target_datetime = arrow.utcnow()
# Get production values for target and target-1 day
df_current = get_production_data_energieopwek(target_datetime, session=session)
df_previous = get_production_data_energieopwek(
target_datetime.shift(days=-1), session=session
)
# Concat them, oldest first to keep chronological order intact
df = pd.concat([df_previous, df_current])
output = []
base_time = (
arrow.get(target_datetime.date(), "Europe/Paris").shift(days=-1).to("utc")
)
for i, prod in enumerate(df.to_dict(orient="records")):
output.append(
{
"zoneKey": "NL",
"datetime": base_time.shift(minutes=i * 15).datetime,
"production": prod,
"source": "energieopwek.nl, entsoe.eu",
}
)
return output
def get_production_data_energieopwek(date, session=None):
r = session or requests.session()
# The API returns values per day from local time midnight until the last
# round 10 minutes if the requested date is today or for the entire day if
# it's in the past. 'sid' can be anything.
url = "http://energieopwek.nl/jsonData.php?sid=2ecde3&Day=%s" % date.format(
"YYYY-MM-DD"
)
response = r.get(url)
obj = response.json()
production_input = obj["TenMin"]["Country"]
# extract the power values in kW from the different production types
# we only need column 0, 1 and 3 contain energy sum values
df_solar = (
pd.DataFrame(production_input["Solar"])
.drop(["1", "3"], axis=1)
.astype(int)
.rename(columns={"0": "solar"})
)
df_offshore = (
pd.DataFrame(production_input["WindOffshore"])
.drop(["1", "3"], axis=1)
.astype(int)
)
df_onshore = (
pd.DataFrame(production_input["Wind"]).drop(["1", "3"], axis=1).astype(int)
)
# We don't differentiate between onshore and offshore wind so we sum them
# toghether and build a single data frame with named columns
df_wind = df_onshore.add(df_offshore).rename(columns={"0": "wind"})
df = pd.concat([df_solar, df_wind], axis=1)
# resample from 10min resolution to 15min resolution to align with ENTSOE data
# we duplicate every row and then group them per 3 and take the mean
df = (
pd.concat([df] * 2)
.sort_index(axis=0)
.reset_index(drop=True)
.groupby(by=lambda x: math.floor(x / 3))
.mean()
)
# Convert kW to MW with kW resolution
df = df.apply(lambda x: round(x / 1000, 3))
return df
def get_wind_capacities() -> pd.DataFrame:
url_wind_capacities = "https://api.windstats.nl/stats"
capacities_df = pd.DataFrame(columns=["datetime", "capacity (MW)"])
try:
r = requests.get(url_wind_capacities)
per_year_split_capacity = r.json()["combinedPowerPerYearSplitByLandAndSea"]
except Exception as e:
logger.error(f"Error fetching wind capacities: {e}")
return capacities_df
per_year_capacity = {
f"{year}-01-01 00:00:00+00:00": sum(split.values())
for (year, split) in per_year_split_capacity.items()
}
capacities_df["datetime"] = pd.to_datetime(list(per_year_capacity.keys()))
capacities_df["capacity (MW)"] = list(per_year_capacity.values())
capacities_df = capacities_df.set_index("datetime")
return capacities_df
def get_solar_capacities() -> pd.DataFrame:
solar_capacity_base_url = "https://opendata.cbs.nl/ODataApi/odata/82610ENG/UntypedDataSet?$filter=((EnergySourcesTechniques+eq+%27E006590+%27))+and+("
START_YEAR = 2010
end_year = arrow.now().year
years = list(range(START_YEAR, end_year + 1))
url_solar_capacity = copy(solar_capacity_base_url)
for i, year in enumerate(years):
if i == len(years) - 1:
url_solar_capacity += f"(Periods+eq+%27{year}JJ00%27))"
else:
url_solar_capacity += f"(Periods+eq+%27{year}JJ00%27)+or+"
solar_capacity_df = | pd.DataFrame(columns=["datetime", "capacity (MW)"]) | pandas.DataFrame |
# -*- coding: utf-8 -*
########## file path ##########
##### input file
# training set keys uic-label with k_means clusters' label
path_df_part_1_uic_label_cluster = "df_part_1_uic_label_cluster.csv"
path_df_part_2_uic_label_cluster = "df_part_2_uic_label_cluster.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
# item_sub_set P
path_df_P = "tianchi_fresh_comp_train_item.csv"
##### output file
path_df_result = "res_gbdt_k_means_subsample.csv"
path_df_result_tmp = "df_result_tmp.csv"
# depending package
import pandas as pd
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
import time
# some functions
def df_read(path, mode='r'):
'''the definition of dataframe loading function'''
data_file = open(path, mode)
try:
df = pd.read_csv(data_file, index_col=False)
finally:
data_file.close()
return df
def subsample(df, sub_size):
'''the definition of sub-sample function
@param df: dataframe
@param sub_size: sub_sample set size
@return sub-dataframe with the same formation of df'''
if sub_size >= len(df):
return df
else:
return df.sample(n=sub_size)
##### loading data of part 1 & 2
df_part_1_uic_label_cluster = df_read(path_df_part_1_uic_label_cluster)
df_part_2_uic_label_cluster = df_read(path_df_part_2_uic_label_cluster)
df_part_1_U = df_read(path_df_part_1_U)
df_part_1_I = df_read(path_df_part_1_I)
df_part_1_C = df_read(path_df_part_1_C)
df_part_1_IC = df_read(path_df_part_1_IC)
df_part_1_UI = df_read(path_df_part_1_UI)
df_part_1_UC = df_read(path_df_part_1_UC)
df_part_2_U = df_read(path_df_part_2_U)
df_part_2_I = df_read(path_df_part_2_I)
df_part_2_C = df_read(path_df_part_2_C)
df_part_2_IC = df_read(path_df_part_2_IC)
df_part_2_UI = df_read(path_df_part_2_UI)
df_part_2_UC = df_read(path_df_part_2_UC)
##### generation of training set & valid set
def train_set_construct(np_ratio=1, sub_ratio=1):
'''
# generation of train set
@param np_ratio: int, the sub-sample rate of training set for N/P balanced.
@param sub_ratio: float ~ (0~1], the further sub-sample rate of training set after N/P balanced.
'''
train_part_1_uic_label = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
train_part_2_uic_label = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
frac_ratio = sub_ratio * np_ratio / 1200
for i in range(1, 1001, 1):
train_part_1_uic_label_0_i = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == i]
train_part_1_uic_label_0_i = train_part_1_uic_label_0_i.sample(frac=frac_ratio)
train_part_1_uic_label = pd.concat([train_part_1_uic_label, train_part_1_uic_label_0_i])
train_part_2_uic_label_0_i = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == i]
train_part_2_uic_label_0_i = train_part_2_uic_label_0_i.sample(frac=frac_ratio)
train_part_2_uic_label = pd.concat([train_part_2_uic_label, train_part_2_uic_label_0_i])
print("training subset uic_label keys is selected.")
# constructing training set
train_part_1_df = pd.merge(train_part_1_uic_label, df_part_1_U, how='left', on=['user_id'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_I, how='left', on=['item_id'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_C, how='left', on=['item_category'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_UC, how='left', on=['user_id', 'item_category'])
train_part_2_df = pd.merge(train_part_2_uic_label, df_part_2_U, how='left', on=['user_id'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_I, how='left', on=['item_id'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_C, how='left', on=['item_category'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_IC, how='left', on=['item_id', 'item_category'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_UC, how='left', on=['user_id', 'item_category'])
train_df = pd.concat([train_part_1_df, train_part_2_df])
# fill the missing value as -1 (missing value are time features)
train_df.fillna(-1, inplace=True)
# using all the features for training gbdt model
train_X = train_df.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate', 'u_b4_diff_hours',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate', 'i_b4_diff_hours',
'c_u_count_in_6', 'c_u_count_in_3', 'c_u_count_in_1',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate', 'c_b4_diff_hours',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'ui_b1_last_hours', 'ui_b2_last_hours', 'ui_b3_last_hours', 'ui_b4_last_hours',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u',
'uc_b1_last_hours', 'uc_b2_last_hours', 'uc_b3_last_hours', 'uc_b4_last_hours'])
train_y = train_df['label'].values
print("train subset is generated.")
return train_X, train_y
def valid_set_construct(sub_ratio=0.1):
'''
# generation of valid set
@param sub_ratio: float ~ (0~1], the sub-sample rate of original valid set
'''
valid_part_1_uic_label = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
valid_part_2_uic_label = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
for i in range(1, 1001, 1):
valid_part_1_uic_label_0_i = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == i]
valid_part_1_uic_label_0_i = valid_part_1_uic_label_0_i.sample(frac=sub_ratio)
valid_part_1_uic_label = pd.concat([valid_part_1_uic_label, valid_part_1_uic_label_0_i])
valid_part_2_uic_label_0_i = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == i]
valid_part_2_uic_label_0_i = valid_part_2_uic_label_0_i.sample(frac=sub_ratio)
valid_part_2_uic_label = pd.concat([valid_part_2_uic_label, valid_part_2_uic_label_0_i])
# constructing valid set
valid_part_1_df = pd.merge(valid_part_1_uic_label, df_part_1_U, how='left', on=['user_id'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_I, how='left', on=['item_id'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_C, how='left', on=['item_category'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_IC, how='left', on=['item_id', 'item_category'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_UC, how='left', on=['user_id', 'item_category'])
valid_part_2_df = pd.merge(valid_part_2_uic_label, df_part_2_U, how='left', on=['user_id'])
valid_part_2_df = pd.merge(valid_part_2_df, df_part_2_I, how='left', on=['item_id'])
valid_part_2_df = pd.merge(valid_part_2_df, df_part_2_C, how='left', on=['item_category'])
valid_part_2_df = pd.merge(valid_part_2_df, df_part_2_IC, how='left', on=['item_id', 'item_category'])
valid_part_2_df = pd.merge(valid_part_2_df, df_part_2_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
valid_part_2_df = pd.merge(valid_part_2_df, df_part_2_UC, how='left', on=['user_id', 'item_category'])
valid_df = pd.concat([valid_part_1_df, valid_part_2_df])
# fill the missing value as -1 (missing value are time features)
valid_df.fillna(-1, inplace=True)
# using all the features for valid gbdt model
valid_X = valid_df.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate', 'u_b4_diff_hours',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate', 'i_b4_diff_hours',
'c_u_count_in_6', 'c_u_count_in_3', 'c_u_count_in_1',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate', 'c_b4_diff_hours',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'ui_b1_last_hours', 'ui_b2_last_hours', 'ui_b3_last_hours', 'ui_b4_last_hours',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u',
'uc_b1_last_hours', 'uc_b2_last_hours', 'uc_b3_last_hours', 'uc_b4_last_hours'])
valid_y = valid_df['label'].values
print("valid subset is generated.")
return valid_X, valid_y
##### generation and splitting to training set & valid set
def valid_train_set_construct(valid_ratio=0.5, valid_sub_ratio=0.5, train_np_ratio=1, train_sub_ratio=0.5):
'''
# generation of train set
@param valid_ratio: float ~ [0~1], the valid set ratio in total set and the rest is train set
@param valid_sub_ratio: float ~ (0~1), random sample ratio of valid set
@param train_np_ratio:(1~1200), the sub-sample ratio of training set for N/P balanced.
@param train_sub_ratio: float ~ (0~1), random sample ratio of train set after N/P subsample
@return valid_X, valid_y, train_X, train_y
'''
msk_1 = np.random.rand(len(df_part_1_uic_label_cluster)) < valid_ratio
msk_2 = np.random.rand(len(df_part_2_uic_label_cluster)) < valid_ratio
valid_df_part_1_uic_label_cluster = df_part_1_uic_label_cluster.loc[msk_1]
valid_df_part_2_uic_label_cluster = df_part_2_uic_label_cluster.loc[msk_2]
valid_part_1_uic_label = valid_df_part_1_uic_label_cluster[valid_df_part_1_uic_label_cluster['class'] == 0].sample(
frac=valid_sub_ratio)
valid_part_2_uic_label = valid_df_part_2_uic_label_cluster[valid_df_part_2_uic_label_cluster['class'] == 0].sample(
frac=valid_sub_ratio)
### constructing valid set
for i in range(1, 1001, 1):
valid_part_1_uic_label_0_i = valid_df_part_1_uic_label_cluster[valid_df_part_1_uic_label_cluster['class'] == i]
if len(valid_part_1_uic_label_0_i) != 0:
valid_part_1_uic_label_0_i = valid_part_1_uic_label_0_i.sample(frac=valid_sub_ratio)
valid_part_1_uic_label = pd.concat([valid_part_1_uic_label, valid_part_1_uic_label_0_i])
valid_part_2_uic_label_0_i = valid_df_part_2_uic_label_cluster[valid_df_part_2_uic_label_cluster['class'] == i]
if len(valid_part_2_uic_label_0_i) != 0:
valid_part_2_uic_label_0_i = valid_part_2_uic_label_0_i.sample(frac=valid_sub_ratio)
valid_part_2_uic_label = pd.concat([valid_part_2_uic_label, valid_part_2_uic_label_0_i])
valid_part_1_df = pd.merge(valid_part_1_uic_label, df_part_1_U, how='left', on=['user_id'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_I, how='left', on=['item_id'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_C, how='left', on=['item_category'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_IC, how='left', on=['item_id', 'item_category'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_UC, how='left', on=['user_id', 'item_category'])
valid_part_2_df = pd.merge(valid_part_2_uic_label, df_part_2_U, how='left', on=['user_id'])
valid_part_2_df = pd.merge(valid_part_2_df, df_part_2_I, how='left', on=['item_id'])
valid_part_2_df = | pd.merge(valid_part_2_df, df_part_2_C, how='left', on=['item_category']) | pandas.merge |
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import datetime as dt
import requests
import io
import zipfile
from kungfu.series import FinancialSeries
from kungfu.frame import FinancialDataFrame
def download_factor_data(freq='D'):
'''
Downloads factor data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
'''
if freq is 'D':
# Download Carhartt 4 Factors
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
mom = web.DataReader('F-F_Momentum_Factor_daily', 'famafrench', start='1/1/1900')[0]
factors_daily = factors_daily.join(mom)
factors_daily = factors_daily[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_daily.columns = ['Mkt-RF','SMB','HML','Mom','RF']
return FinancialDataFrame(factors_daily)
elif freq is 'M':
# Download Carhartt 4 Factors
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
# mom = web.DataReader('F-F_Momentum_Factor', 'famafrench', start='1/1/1900')[0] #There seems to be a problem with the data file, fix if mom is needed
# factors_monthly = factors_monthly.join(mom)
# factors_monthly = factors_monthly[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_monthly.index = factors_monthly.index.to_timestamp()
# factors_monthly.columns = ['Mkt-RF','SMB','HML','Mom','RF']
factors_monthly.columns = ['Mkt-RF','SMB','HML','RF']
factors_monthly.index = factors_monthly.index+pd.tseries.offsets.MonthEnd(0)
return FinancialDataFrame(factors_monthly)
def download_industry_data(freq='D', excessreturns = True):
'''
Downloads industry data from <NAME>'s website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 49 Industries
industries_daily = web.DataReader("49_Industry_Portfolios_Daily", "famafrench", start='1/1/1900')[0]
industries_daily[(industries_daily <= -99.99) | (industries_daily == -999)] = np.nan #set missing data to NaN
industries_daily = industries_daily.rename_axis('Industry', axis='columns')
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
industries_daily = industries_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return industries_daily
elif freq is 'M':
# Download Fama/French 49 Industries
industries_monthly = web.DataReader("49_Industry_Portfolios", "famafrench", start='1/1/1900')[0]
industries_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
industries_monthly = industries_monthly.rename_axis('Industry', axis='columns')
industries_monthly.index = industries_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
industries_monthly = industries_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
industries_monthly.index = industries_monthly.index+pd.tseries.offsets.MonthEnd(0)
return industries_monthly
def download_25portfolios_data(freq='D', excessreturns = True):
'''
Downloads 25 portfolios data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 25 portfolios
portfolios_daily = web.DataReader("25_Portfolios_5x5_CSV", "famafrench", start='1/1/1900')[0]
portfolios_daily[(portfolios_daily <= -99.99) | (portfolios_daily == -999)] = np.nan #set missing data to NaN
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
portfolios_daily = portfolios_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return portfolios_daily
elif freq is 'M':
# Download Fama/French 25 portfolios
portfolios_monthly = web.DataReader("25_Portfolios_5x5_Daily_CSV", "famafrench", start='1/1/1900')[0]
portfolios_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
portfolios_monthly.index = portfolios_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
portfolios_monthly = portfolios_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
return portfolios_monthly
def download_recessions_data(freq='M', startdate='1/1/1900', enddate=dt.datetime.today()):
'''
Downloads NBER recessions from FRED and returns series.
freq can be either 'D' (daily) or 'M' (monthly).
startdate and enddate define the length of the timeseries.
'''
USREC_monthly = web.DataReader('USREC', 'fred',start = startdate, end=enddate)
if freq is 'M':
return USREC_monthly
if freq is 'D':
first_day = USREC_monthly.index.min() - pd.DateOffset(day=1)
last_day = USREC_monthly.index.max() + pd.DateOffset(day=31)
dayindex = pd.date_range(first_day, last_day, freq='D')
dayindex.name = 'DATE'
USREC_daily = USREC_monthly.reindex(dayindex, method='ffill')
return USREC_daily
def download_jpy_usd_data():
'''
Downloads USD/JPY exchange rate data from FRED and returns series.
'''
jpy = web.DataReader('DEXJPUS', 'fred', start = '1900-01-01')
return jpy
def download_cad_usd_data():
'''
Downloads USD/CAD exchange rate data from FRED and returns series.
'''
cad = web.DataReader('DEXCAUS', 'fred', start = '1900-01-01')
return cad
def download_vix_data():
'''
Downloads VIX index data from FRED and returns series.
'''
vix = web.DataReader('VIXCLS', 'fred', start = '1900-01-01')
return vix
def download_goyal_welch_svar():
'''
Downloads Goyal/Welch SVAR data from Amit Goyal's website and returns DataFrame.
'''
url = 'http://www.hec.unil.ch/agoyal/docs/PredictorData2017.xlsx'
sheet = pd.read_excel(url, sheet_name='Monthly')
dates = sheet['yyyymm']
SVAR = pd.DataFrame(sheet['svar'])
SVAR.index = [(dt.datetime(year = math.floor(date/100),month = date%100,day = 1)+dt.timedelta(days=32)).replace(day=1)-dt.timedelta(days=1) for date in dates]
return SVAR
def download_sadka_liquidity():
'''
Downloads Sadka liquidity factor data from <NAME>'s website and returns DataFrame.
'''
url = 'http://www2.bc.edu/ronnie-sadka/Sadka-LIQ-factors-1983-2012-WRDS.xlsx'
sheet = pd.read_excel(url, sheet_name='Sheet1')
dates = sheet['Date']
SadkaLIQ1 = pd.DataFrame(sheet['Fixed-Transitory'])
SadkaLIQ1.index = [(dt.datetime(year = math.floor(date/100),month = date%100,day = 1)+dt.timedelta(days=32)).replace(day=1)-dt.timedelta(days=1) for date in dates]
SadkaLIQ2 = | pd.DataFrame(sheet['Variable-Permanent']) | pandas.DataFrame |
"""Auto ARIMA transformer is a time series transformer that predicts target using ARIMA models"""
# For more information about the python ARIMA package
# please visit https://www.alkaline-ml.com/pmdarima/index.html
import importlib
from h2oaicore.transformer_utils import CustomTimeSeriesTransformer
import datatable as dt
import numpy as np
import pandas as pd
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning
class MyAutoArimaTransformer(CustomTimeSeriesTransformer):
_binary = False
_multiclass = False
_modules_needed_by_name = ['pmdarima']
_included_model_classes = None
@staticmethod
def get_default_properties():
return dict(col_type="time_column", min_cols=1, max_cols=1, relative_importance=1)
def fit(self, X: dt.Frame, y: np.array = None):
"""
Fits ARIMA models (1 per time group) using historical target values contained in y
:param X: Datatable frame containing the features
:param y: numpy array containing the historical values of the target
:return: self
"""
# Import the ARIMA python module
pm = importlib.import_module('pmdarima')
# Init models
self.models = {}
# Convert to pandas
X = X.to_pandas()
XX = X[self.tgc].copy()
XX['y'] = np.array(y)
self.nan_value = np.mean(y)
self.ntrain = X.shape[0]
# Group the input by TGC (Time group column) excluding the time column itself
tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))
if len(tgc_wo_time) > 0:
XX_grp = XX.groupby(tgc_wo_time)
else:
XX_grp = [([None], XX)]
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(
experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir
)
# Build 1 ARIMA model per time group columns
nb_groups = len(XX_grp)
for _i_g, (key, X) in enumerate(XX_grp):
# Just say where we are in the fitting process
if (_i_g + 1) % max(1, nb_groups // 20) == 0:
loggerinfo(logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups))
key = key if isinstance(key, list) else [key]
grp_hash = '_'.join(map(str, key))
# print("auto arima - fitting on data of shape: %s for group: %s" % (str(X.shape), grp_hash))
order = np.argsort(X[self.time_column])
try:
model = pm.auto_arima(X['y'].values[order], error_action='ignore')
except:
model = None
self.models[grp_hash] = model
return self
def transform(self, X: dt.Frame):
"""
Uses fitted models (1 per time group) to predict the target
If self.is_train exists, it means we are doing in-sample predictions
if it does not then we Arima is used to predict the future
:param X: Datatable Frame containing the features
:return: ARIMA predictions
"""
X = X.to_pandas()
XX = X[self.tgc].copy()
tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))
if len(tgc_wo_time) > 0:
XX_grp = XX.groupby(tgc_wo_time)
else:
XX_grp = [([None], XX)]
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(
experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir
)
nb_groups = len(XX_grp)
preds = []
for _i_g, (key, X) in enumerate(XX_grp):
# Just say where we are in the fitting process
if (_i_g + 1) % max(1, nb_groups // 20) == 0:
loggerinfo(logger, "Auto ARIMA : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups))
key = key if isinstance(key, list) else [key]
grp_hash = '_'.join(map(str, key))
# print("auto arima - transforming data of shape: %s for group: %s" % (str(X.shape), grp_hash))
order = np.argsort(X[self.time_column])
if grp_hash in self.models:
model = self.models[grp_hash]
if model is not None:
yhat = model.predict_in_sample() \
if hasattr(self, 'is_train') else model.predict(n_periods=X.shape[0])
yhat = yhat[order]
XX = | pd.DataFrame(yhat, columns=['yhat']) | pandas.DataFrame |
from PriceIndices import Indices, MarketHistory
import pandas as pd
import numpy as np
history = MarketHistory()
def get_coin_data(crypto='bitcoin', start_date='20130428', end_date='20200501', save_data=None):
df = history.get_price(crypto, start_date, end_date)
df_bi = Indices.get_bvol_index(df) # Bitmax Volatility Index
df_bi.drop('price', axis=1, inplace=True)
df_rsi = Indices.get_rsi(df) # Relative Strength Index
df_rsi.drop(['price', 'RS_Smooth', 'RSI_1'], axis=1, inplace=True)
df_sma = Indices.get_simple_moving_average(df) # Simple Moving Average
df_sma.drop(['price'], axis=1, inplace=True)
df_bb = Indices.get_bollinger_bands(df) # Bollunger Bands
df_bb.drop(['price'], axis=1, inplace=True)
df_ema = Indices.get_exponential_moving_average(df, [20, 50]) # Exponential Moving Average
df_ema.drop(['price'], axis=1, inplace=True)
df_macd = Indices.get_moving_average_convergence_divergence(df) # Moving Average Convergence Divergence
df_macd.drop(['price',], axis=1, inplace=True)
df = pd.merge(df, df_macd, on='date', how='left')
df = pd.merge(df, df_rsi, on='date', how='left')
df = | pd.merge(df, df_bi, on='date', how='left') | pandas.merge |
import sys
import os
import math
import copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import rankdata
import multiprocessing as mp
import logging
import scanpy as sc
import anndata as ad
from scipy.io import mmread,mmwrite
from scipy.sparse import csr_matrix,issparse
import matplotlib as mpl
from functools import reduce
from sklearn.decomposition import PCA
import umap
from sctriangulate.colors import *
# for publication ready figure
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['font.family'] = 'Arial'
def sctriangulate_preprocessing_setting(backend='Agg',png=False):
# change the backend
mpl.use(backend)
if png:
# for publication and super large dataset
mpl.rcParams['savefig.dpi'] = 600
mpl.rcParams['figure.dpi'] = 600
def small_txt_to_adata(int_file,gene_is_index=True):
'''
given a small dense expression (<2GB) txt file, load them into memory as adata, and also make sure the X is sparse matrix.
:param int_file: string, path to the input txt file, delimited by tab
:param gene_is_index: boolean, whether the gene/features are the index.
:return: AnnData
Exmaples::
from sctriangulate.preprocessing import small_txt_to_adata
adata= = small_txt_to_adata('./input.txt',gene_is_index=True)
'''
df = pd.read_csv(int_file,sep='\t',index_col=0)
if gene_is_index:
adata = ad.AnnData(X=csr_matrix(df.values.T),var=pd.DataFrame(index=df.index.values),obs=pd.DataFrame(index=df.columns.values))
else:
adata = ad.AnnData(X=csr_matrix(df.values),var=pd.DataFrame(index=df.columns.values),obs=pd.DataFrame(index=df.index.values))
adata.var_names_make_unique()
adata.X = csr_matrix(adata.X)
return adata
def large_txt_to_mtx(int_file,out_folder,gene_is_index=True,type_convert_to='int16'): # whether the txt if gene * cell
'''
Given a large txt dense expression file, convert them to mtx file on cluster to facilitate future I/O
:param int_file: string, path to the intput txt file, delimited by tab
:param out_folder: string, path to the output folder where the mtx file will be stored
:param gene_is_index: boolean, whether the gene/features is the index in the int_file.
:param type_convert_to: string, since it is a large dataframe, need to read in chunk, to accelarate it and reduce the memory footprint,
we convert it to either 'int16' if original data is count, or 'float32' if original data is normalized data.
Examples::
from sctriangulate.preprocessing import large_txt_to_mtx
large_txt_to_mtx(int_file='input.txt',out_folder='./data',gene_is_index=False,type_convert_to='float32')
'''
reader = pd.read_csv(int_file,sep='\t',index_col=0,chunksize=1000)
store = []
for chunk in reader:
tmp = chunk.astype(type_convert_to)
store.append(tmp)
data = pd.concat(store)
print(data.shape)
'''save as mtx, now!!!'''
if not os.path.exists(out_folder):
os.mkdir(out_folder)
if gene_is_index:
data.index.to_series().to_csv(os.path.join(out_folder,'genes.tsv'),sep='\t',header=None,index=None)
data.columns.to_series().to_csv(os.path.join(out_folder,'barcodes.tsv'),sep='\t',header=None,index=None)
mmwrite(os.path.join(out_folder,'matrix.mtx'),csr_matrix(data.values))
else:
data.columns.to_series().to_csv(os.path.join(out_folder,'genes.tsv'),sep='\t',header=None,index=None)
data.index.to_series().to_csv(os.path.join(out_folder,'barcodes.tsv'),sep='\t',header=None,index=None)
mmwrite(os.path.join(out_folder,'matrix.mtx'),csr_matrix(data.values.T))
def mtx_to_adata(int_folder,gene_is_index=True,feature='genes',feature_col='index',barcode_col='index'): # whether the mtx file is gene * cell
'''
convert mtx file to adata in RAM, make sure the X is sparse.
:param int_folder: string, folder where the mtx files are stored.
:param gene_is_index: boolean, whether the gene is index.
:param feature: string, the name of the feature tsv file, if rna, it will be genes.tsv.
:param feature_col: 'index' as index, or a int (which column, python is zero based) to use in your feature.tsv as feature
:param barcode_col: 'index' as index, or a int (which column, python is zero based) to use in your barcodes.tsv as barcode
:return: AnnData
Examples::
from sctriangulate.preprocessing import mtx_to_adata
mtx_to_adata(int_folder='./data',gene_is_index=False,feature='genes')
'''
if feature_col == 'index':
gene = pd.read_csv(os.path.join(int_folder,'{}.tsv'.format(feature)),sep='\t',index_col=0,header=None).index
else:
gene = pd.read_csv(os.path.join(int_folder,'{}.tsv'.format(feature)),sep='\t',index_col=0,header=None)[feature_col]
if barcode_col == 'index':
cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None).index
else:
cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None)[barcode_col]
value = csr_matrix(mmread(os.path.join(int_folder,'matrix.mtx')))
if gene_is_index:
value = value.T
adata = ad.AnnData(X=value,obs=pd.DataFrame(index=cell),var=pd.DataFrame(index=gene))
else:
adata = ad.AnnData(X=value,obs=pd.DataFrame(index=cell),var=pd.DataFrame(index=gene))
adata.var.index.name = None
adata.var_names_make_unique()
return adata
def mtx_to_large_txt(int_folder,out_file,gene_is_index=False):
'''
convert mtx back to large dense txt expression dataframe.
:param int_folder: string, path to the input mtx folder.
:param out_file: string, path to the output txt file.
:param gene_is_index: boolean, whether the gene is the index.
Examples::
from sctriangulate.preprocessing import mtx_to_large_txt
mtx_to_large_txt(int_folder='./data',out_file='input.txt',gene_is_index=False)
'''
gene = pd.read_csv(os.path.join(int_folder,'genes.tsv'),sep='\t',index_col=0,header=None).index
cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None).index
value = mmread(os.path.join(int_folder,'matrix.mtx')).toarray()
if gene_is_index:
data = pd.DataFrame(data=value,index=gene,columns=cell)
else:
data = pd.DataFrame(data=value.T,index=cell,columns=gene)
data.to_csv(out_file,sep='\t',chunksize=1000)
def adata_to_mtx(adata,gene_is_index=True,var_column=None,obs_column=None,outdir='data'):
# create folder if not exist
if not os.path.exists(outdir):
os.mkdir(outdir)
# write genes.tsv
if var_column is None:
var = adata.var_names.to_series()
else:
var = adata.var[var_column]
var.to_csv(os.path.join(outdir,'genes.tsv'),sep='\t',header=None,index=None)
# write barcodes.tsv
if obs_column is None:
obs = adata.obs_names.to_series()
else:
obs = adata.obs[obs_column]
obs.to_csv(os.path.join(outdir,'barcodes.tsv'),sep='\t',header=None,index=None)
# write matrix.mtx
if not gene_is_index:
mmwrite(os.path.join(outdir,'matrix.mtx'),make_sure_mat_sparse(adata.X))
else:
mmwrite(os.path.join(outdir,'matrix.mtx'),make_sure_mat_sparse(adata.X).transpose())
def add_azimuth(adata,result,name='predicted.celltype.l2'):
'''
a convenient function if you have azimuth predicted labels in hand, and want to add the label to the adata.
:param adata: AnnData
:param result: string, the path to the 'azimuth_predict.tsv' file
:param name: string, the column name where the user want to transfer to the adata.
Examples::
from sctriangulate.preprocessing import add_azimuth
add_azimuth(adata,result='./azimuth_predict.tsv',name='predicted.celltype.l2')
'''
azimuth = pd.read_csv(result,sep='\t',index_col=0)
azimuth_map = azimuth[name].to_dict()
azimuth_prediction = azimuth['{}.score'.format(name)].to_dict()
azimuth_mapping = azimuth['mapping.score'].to_dict()
adata.obs['azimuth'] = adata.obs_names.map(azimuth_map).values
adata.obs['prediction_score'] = adata.obs_names.map(azimuth_prediction).values
adata.obs['mapping_score'] = adata.obs_names.map(azimuth_mapping).values
def add_annotations(adata,inputs,cols_input,index_col=0,cols_output=None,kind='disk'):
'''
Adding annotations from external sources to the adata
:param adata: Anndata
:param inputs: string, path to the txt file where the barcode to cluster label information is stored.
:param cols_input: list, what columns the users want to transfer to the adata.
:param index_col: int, for the input, which column will serve as the index column
:param cols_output: list, corresponding to the cols_input, how these columns will be named in the adata.obs columns
:param kind: a string, either 'disk', or 'memory', disk means the input is the path to the text file, 'memory' means the input is the
variable name in the RAM that represents the dataframe
Examples::
from sctriangulate.preprocessing import add_annotations
add_annotations(adata,inputs='./annotation.txt',cols_input=['col1','col2'],index_col=0,cols_output=['annotation1','annontation2'],kind='disk')
add_annotations(adata,inputs=df,cols_input=['col1','col2'],index_col=0,cols_output=['annotation1','annontation2'],kind='memory')
'''
# means a single file such that one column is barcodes, annotations are within other columns
if kind == 'disk':
annotations = pd.read_csv(inputs,sep='\t',index_col=index_col).loc[:,cols_input]
elif kind == 'memory': # index_col will be ignored
annotations = inputs.loc[:,cols_input]
mappings = []
for col in cols_input:
mapping = annotations[col].to_dict()
mappings.append(mapping)
if cols_output is None:
for i,col in enumerate(cols_input):
adata.obs[col] = adata.obs_names.map(mappings[i]).fillna('Unknown').values
adata.obs[col] = adata.obs[col].astype('str').astype('category')
else:
for i in range(len(cols_input)):
adata.obs[cols_output[i]] = adata.obs_names.map(mappings[i]).fillna('Unknown').values
adata.obs[cols_output[i]] = adata.obs[cols_output[i]].astype('str').astype('category')
def add_umap(adata,inputs,mode,cols=None,index_col=0):
'''
if umap embedding is pre-computed, add it back to adata object.
:param adata: Anndata
:param inputs: string, path to the the txt file where the umap embedding was stored.
:param mode: string, valid value 'pandas_disk', 'pandas_memory', 'numpy'
* **pandas_disk**: the `inputs` argument should be the path to the txt file
* **pandas_memory**: the `inputs` argument should be the name of the pandas dataframe in the program, inputs=df
* **numpy**, the `inputs` argument should be a 2D ndarray contains pre-sorted (same order as barcodes in adata) umap coordinates
:param cols: list, what columns contain umap embeddings
:param index_col: int, which column will serve as the index column.
Examples::
from sctriangulate.preprocessing import add_umap
add_umap(adata,inputs='umap.txt',mode='pandas_disk',cols=['umap1','umap2'],index_col=0)
'''
# make sure cols are [umap_x, umap_y]
if mode == 'pandas_disk':
df = pd.read_csv(inputs,sep='\t',index_col=index_col)
umap_x = df[cols[0]].to_dict()
umap_y = df[cols[1]].to_dict()
adata.obs['umap_x'] = adata.obs_names.map(umap_x).values
adata.obs['umap_y'] = adata.obs_names.map(umap_y).values
adata.obsm['X_umap'] = adata.obs.loc[:,['umap_x','umap_y']].values
adata.obs.drop(columns=['umap_x','umap_y'],inplace=True)
elif mode == 'pandas_memory':
df = inputs
umap_x = df[cols[0]].to_dict()
umap_y = df[cols[1]].to_dict()
adata.obs['umap_x'] = adata.obs_names.map(umap_x).values
adata.obs['umap_y'] = adata.obs_names.map(umap_y).values
adata.obsm['X_umap'] = adata.obs.loc[:,['umap_x','umap_y']].values
adata.obs.drop(columns=['umap_x','umap_y'],inplace=True)
elif mode == 'numpy': # assume the order is correct
adata.obsm['X_umap'] = inputs
def doublet_predict(adata): # gave RNA count or log matrix
'''
wrapper function for running srublet, a new column named 'doublet_scores' will be added to the adata
:param adata: Anndata
:return: dict
Examples::
from sctriangulate.preprocessing import doublet_predict
mapping = doublet_predict(old_adata)
'''
from scipy.sparse import issparse
import scrublet as scr
if issparse(adata.X):
adata.X = adata.X.toarray()
counts_matrix = adata.X
scrub = scr.Scrublet(counts_matrix)
doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=1, min_cells=1)
adata.obs['doublet_scores'] = doublet_scores
return adata.obs['doublet_scores'].to_dict()
def make_sure_adata_writable(adata,delete=False):
'''
maks sure the adata is able to write to disk, since h5 file is stricted typed, so no mixed dtype is allowd.
this function basically is to detect the column of obs/var that are of mixed types, and delete them.
:param adata: Anndata
:param delete: boolean, False will just print out what columns are mixed type, True will automatically delete those columns
:return: Anndata
Examples::
from sctriangulate.preprocessing import make_sure_adata_writable
make_sure_adata_writable(adata,delete=True)
'''
# check index, can not have name
var_names = adata.var_names
obs_names = adata.obs_names
var_names.name = None
obs_names.name = None
adata.var_names = var_names
adata.obs_names = obs_names
# make sure column is pure type, basically, if mixed tyep, delete the column, and print out the delete columns
# go to: https://github.com/theislab/scanpy/issues/1866
var = adata.var
obs = adata.obs
for col in var.columns:
if var[col].dtypes == 'O':
all_type = np.array([type(item) for item in var[col]])
first = all_type[0]
if (first==all_type).all() and first == str: # object, but every item is str
continue
else: # mixed type
print('column {} in var will be deleted, because mixed types'.format(col))
if delete:
adata.var.drop(columns=[col],inplace=True)
for col in obs.columns:
if obs[col].dtypes == 'O':
all_type = np.array([type(item) for item in obs[col]])
first = all_type[0]
if (first==all_type).all() and first == str: # object, but every item is str
continue
else: # mixed type
print('column {} in obs will be deleted, because mixed types'.format(col))
if delete:
adata.obs.drop(columns=[col],inplace=True)
return adata
def scanpy_recipe(adata,species='human',is_log=False,resolutions=[1,2,3],modality='rna',umap=True,save=True,pca_n_comps=None,n_top_genes=3000):
'''
Main preprocessing function. Run Scanpy normal pipeline to achieve Leiden clustering with various resolutions across multiple modalities.
:param adata: Anndata
:param species: string, 'human' or 'mouse'
:param is_log: boolean, whether the adata.X is count or normalized data.
:param resolutions: list, what leiden resolutions the users want to obtain.
:param modality: string, valid values: 'rna','adt','atac', 'binary'[mutation data, TCR data, etc]
:param umap: boolean, whether to compute umap embedding.
:param save: boolean, whether to save the obtained adata object with cluster label information in it.
:param pca_n_comps: int, how many PCs to keep when running PCA. Suggestion: RNA (30-50), ADT (15), ATAC (100)
:param n_top_genes: int, how many features to keep when selecting highly_variable_genes. Suggestion: RNA (3000), ADT (ignored), ATAC (50000-100000)
:return: Anndata
Examples::
from sctriangulate.preprocessing import scanpy_recipe
# rna
adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='rna',pca_n_comps=50,n_top_genes=3000)
# adt
adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='adt',pca_n_comps=15)
# atac
adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='atac',pca_n_comps=100,n_top_genes=100000)
# binary
adata = scanpy_recipe(adata,resolutions=[1,2,3],modality='binary')
'''
adata.var_names_make_unique()
# normal analysis
if modality == 'rna':
if not is_log: # count data
if species == 'human':
adata.var['mt'] = adata.var_names.str.startswith('MT-')
elif species == 'mouse':
adata.var['mt'] = adata.var_names.str.startswith('mt-')
sc.pp.calculate_qc_metrics(adata,qc_vars=['mt'],percent_top=None,inplace=True,log1p=False)
sc.pp.normalize_total(adata,target_sum=1e4)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
sc.pp.regress_out(adata,['total_counts','pct_counts_mt'])
sc.pp.scale(adata,max_value=10)
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
# put raw back to X, and make sure it is sparse matrix
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
else: # log(1+x) and depth normalized data
if species == 'human':
adata.var['mt'] = adata.var_names.str.startswith('MT-')
elif species == 'mouse':
adata.var['mt'] = adata.var_names.str.startswith('mt-')
sc.pp.calculate_qc_metrics(adata,qc_vars=['mt'],percent_top=None,inplace=True,log1p=False)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
sc.pp.regress_out(adata,['total_counts','pct_counts_mt'])
sc.pp.scale(adata,max_value=10)
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
# put raw back to X, and make sure it is sparse matrix
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'atac':
if not is_log:
sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False)
sc.pp.normalize_total(adata,target_sum=1e4)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
#sc.pp.scale(adata,max_value=10) # because in episcanpy toturial, it seems to be ignored
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
else:
sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
#sc.pp.scale(adata,max_value=10)
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'adt':
if not is_log:
sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False)
adata.X = make_sure_mat_sparse(Normalization.CLR_normalization(make_sure_mat_dense(adata.X)))
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
else:
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'binary': # mutation
#sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata,use_rep='X',metric='jaccard')
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'spatial':
sc.pp.scale(adata)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,False))
return adata
def concat_rna_and_other(adata_rna,adata_other,umap,name,prefix):
'''
concatenate rna adata and another modality's adata object
:param adata_rna: AnnData
:param adata_other: Anndata
:param umap: string, whose umap to use, either 'rna' or 'other'
:param name: string, the name of other modality, for example, 'adt' or 'atac'
:param prefix: string, the prefix added in front of features from other modality, by scTriangulate convertion, adt will be 'AB_', atac will be ''.
:return adata_combine: Anndata
Examples::
from sctriangulate.preprocessing import concat_rna_and_other
concat_rna_and_other(adata_rna,adata_adt,umap='rna',name='adt',prefix='AB_')
'''
adata_rna = adata_rna.copy()
adata_other = adata_other.copy()
# remove layers, [!obsm], varm, obsp, varp, raw
for adata in [adata_rna,adata_other]:
del adata.layers
del adata.varm
del adata.obsp
del adata.varp
del adata.raw
adata_other = adata_other[adata_rna.obs_names,:] # make sure the obs order is the same
adata_other.var_names = [prefix + item for item in adata_other.var_names]
adata_combine = ad.concat([adata_rna,adata_other],axis=1,join='outer',merge='first',label='modality',keys=['rna','{}'.format(name)])
if umap == 'rna':
adata_combine.obsm['X_umap'] = adata_rna.obsm['X_umap']
elif umap == 'other':
adata_combine.obsm['X_umap'] = adata_other.obsm['X_umap']
if not issparse(adata_combine.X):
adata_combine.X = csr_matrix(adata_combine.X)
return adata_combine
def nca_embedding(adata,nca_n_components,label,method,max_iter=50,plot=True,save=True,format='pdf',legend_loc='on data',n_top_genes=None,hv_features=None,add_features=None):
'''
Doing Neighborhood component ananlysis (NCA), so it is a supervised PCA that takes the label from the annotation, and try to generate a UMAP
embedding that perfectly separate the labelled clusters.
:param adata: the Anndata
:param nca_n_components: recommend to be 10 based on `Ref <https://www.nature.com/articles/s41586-021-03969-3>`_
:param label: string, the column name which contains the label information
:param method: either 'umap' or 'tsne'
:param max_iter: for the NCA, default is 50, it is generally good enough
:param plot: whether to plot the umap/tsne or not
:param save: whether to save the plot or not
:param format: the saved format, default is 'pdf'
:param legend_loc: 'on data' or 'right margin'
:param n_top_genes: how many hypervariable genes to choose for NCA, recommended 3000 or 5000, default is None, means there will be other features to add, multimodal setting
:param hv_features: a list contains the user-supplied hypervariable genes/features, in multimodal setting, this can be [rna genes] + [ADT protein]
:param add_features: this should be another adata contains features from other modalities, or None means just for RNA
Example::
from sctriangulate.preprocessing import nca_embedding
# only RNA
nca_embedding(adata,nca_n_components=10,label='annotation1',method='umap',n_top_genes=3000)
# RNA + ADT
# list1 contains [gene features that are variable] and [ADT features that are variable]
nca_embedding(adata_rna,nca_n_components=10,label='annotation1',method='umap',n_top_genes=3000,hv_features=list1, add_features=adata_adt)
'''
from sklearn.neighbors import NeighborhoodComponentsAnalysis
adata = adata
if n_top_genes is not None:
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
else:
if add_features is not None: # first add the features, input should be anndata
adata = concat_rna_and_other(adata,add_features,umap=None,name='add_features',prefix='add_features_')
if hv_features is not None: # custom hv
tmp = pd.Series(index=adata.var_names,data=np.full(len(adata.var_names),fill_value=False))
tmp.loc[hv_features] = True
adata.var['highly_variable'] = tmp.values
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
X = make_sure_mat_dense(adata.X)
y = adata.obs[label].values
nca = NeighborhoodComponentsAnalysis(n_components=nca_n_components,max_iter=max_iter)
embed = nca.fit_transform(X,y) # (n_cells,n_components)
adata.obsm['X_nca'] = embed
adata = adata.raw.to_adata()
if method == 'umap':
sc.pp.neighbors(adata,use_rep='X_nca')
sc.tl.umap(adata)
sc.pl.umap(adata,color=label,frameon=False,legend_loc=legend_loc)
if save:
plt.savefig(os.path.join('.','nca_embedding_{}_{}.{}'.format(label,method,format)),bbox_inches='tight')
plt.close()
elif method == 'tsne':
sc.tl.tsne(adata,use_rep='X_nca')
sc.pl.tsne(adata,color=label,frameon=False,legend_loc=legend_loc)
if save:
plt.savefig(os.path.join('.','nca_embedding_{}_{}.{}'.format(label,method,format)),bbox_inches='tight')
plt.close()
adata.X = make_sure_mat_sparse(adata.X)
return adata
def umap_dual_view_save(adata,cols):
'''
generate a pdf file with two umap up and down, one is with legend on side, another is with legend on data.
More importantly, this allows you to generate multiple columns iteratively.
:param adata: Anndata
:param cols: list, all columns from which we want to draw umap.
Examples::
from sctriangulate.preprocessing import umap_dual_view_save
umap_dual_view_save(adata,cols=['annotation1','annotation2','total_counts'])
'''
for col in cols:
fig,ax = plt.subplots(nrows=2,ncols=1,figsize=(8,20),gridspec_kw={'hspace':0.3}) # for final_annotation
sc.pl.umap(adata,color=col,frameon=False,ax=ax[0])
sc.pl.umap(adata,color=col,frameon=False,legend_loc='on data',legend_fontsize=5,ax=ax[1])
plt.savefig('./umap_dual_view_{}.pdf'.format(col),bbox_inches='tight')
plt.close()
def just_log_norm(adata):
sc.pp.normalize_total(adata,target_sum=1e4)
sc.pp.log1p(adata)
return adata
def format_find_concat(adata,canonical_chr_only=True,gtf_file='gencode.v38.annotation.gtf',key_added='gene_annotation',**kwargs):
'''
this is a wrapper function to add nearest genes to your ATAC peaks or bins. For instance, if the peak is chr1:55555-55566,
it will be annotated as chr1:55555-55566_gene1;gene2
:param adata: The anndata, the var_names is the peak/bin, please make sure the format is like chr1:55555-55566
:param canonical_chr_only: boolean, default to True, means only contain features on canonical chromosomes. for human, it is chr1-22 and X,Y
:param gtf_file: the path to the gtf files, we provide the hg38 on this `google drive link <https://drive.google.com/file/d/11gbJl2-wZr3LbpWaU9RiUAGPebqWYi1z/view?usp=sharing>`_ to download
:param key_added: string, the column name where the gene annotation will be inserted to adata.var, default is 'gene_annotation'
:return adata: Anndata, the gene annotation will be added to var, and the var_name will be suffixed with gene annotation, if canonical_chr_only is True, then only features on canonical
chromsome will be retained.
Example::
adata = format_find_concat(adata)
'''
adata= reformat_peak(adata,canonical_chr_only=canonical_chr_only)
find_genes(adata,gtf_file=gtf_file,key_added=key_added,**kwargs)
adata.var_names = [name + '_' + gene for name,gene in zip(adata.var_names,adata.var[key_added])]
return adata
class GeneConvert(object):
'''
A collection of gene symbol conversion functions.
Now support:
1. ensemblgene id to gene symbol.
'''
@staticmethod
def ensemblgene_to_symbol(query,species):
'''
Examples::
from sctriangulate.preprocessing import GeneConvert
converted_list = GeneConvert.ensemblgene_to_symbol(['ENSG00000010404','ENSG00000010505'],species='human')
'''
# assume query is a list, will also return a list
import mygene
mg = mygene.MyGeneInfo()
out = mg.querymany(query,scopes='ensemblgene',fileds='symbol',species=species,returnall=True,as_dataframe=True,df_index=True)
result = out['out']['symbol'].fillna('unknown_gene').tolist()
try:
assert len(query) == len(result)
except AssertionError: # have duplicate results
df = out['out']
df_unique = df.loc[~df.index.duplicated(),:]
result = df_unique['symbol'].fillna('unknown_gene').tolist()
return result
def dual_gene_plot(adata,gene1,gene2,s=8,save=True,format='pdf',dir='.',umap_lim=None):
from scipy.sparse import issparse
if issparse(adata.X):
adata.X = adata.X.toarray()
index1 = np.where(adata.var_names == gene1)[0][0]
index2 = np.where(adata.var_names == gene2)[0][0]
exp1 = adata.X[:,index1]
exp2 = adata.X[:,index2]
color = []
for i in range(len(exp1)):
if exp1[i] > 0 and exp2[i] > 0:
color.append('#F2DE77')
elif exp1[i] > 0 and exp2[i] == 0:
color.append('#5ABF9A')
elif exp1[i] == 0 and exp2[i] > 0:
color.append('#F25C69')
else:
color.append('lightgrey')
fig, ax = plt.subplots()
if umap_lim is not None:
ax.set_xlim(umap_lim[0])
ax.set_ylim(umap_lim[1])
ax.scatter(x=adata.obsm['X_umap'][:,0],y=adata.obsm['X_umap'][:,1],s=s,c=color)
import matplotlib.lines as mlines
ax.legend(handles=[mlines.Line2D([],[],marker='o',color=i,linestyle='') for i in ['#F2DE77','#5ABF9A','#F25C69','lightgrey']],
labels=['Both','{}'.format(gene1),'{}'.format(gene2),'None'],frameon=False,loc='upper left',bbox_to_anchor=[1,1])
if save:
plt.savefig(os.path.join(dir,'sctri_dual_gene_plot_{}_{}.{}'.format(gene1,gene2,format)),bbox_inches='tight')
plt.close()
return ax
def multi_gene_plot(adata,genes,s=8,save=True,format='pdf',dir='.',umap_lim=None):
from scipy.sparse import issparse
if issparse(adata.X):
adata.X = adata.X.toarray()
exp_list = []
for gene in genes:
index_gene = np.where(adata.var_names == gene)[0][0]
exp_gene = adata.X[:,index_gene]
exp_list.append(exp_gene)
color = []
for i in range(len(exp_list[0])):
if len(genes) == 3:
c = ['#04BFBF','#83A603','#F7766D']
elif len(genes) == 4:
c = ['#04BFBF','#83A603','#F7766D','#E36DF2']
elif len(genes) == 5:
c = ['#04BFBF','#83A603','#F7766D','#E36DF2','#A69B03']
b = '#BABABA'
l_exp = np.array([exp[i] for exp in exp_list])
n_exp = np.count_nonzero(l_exp > 0)
if n_exp > 1:
color.append(c[np.where(l_exp==l_exp.max())[0][0]])
elif n_exp == 1:
color.append(c[np.where(l_exp>0)[0][0]])
elif n_exp == 0:
color.append(b)
fig, ax = plt.subplots()
if umap_lim is not None:
ax.set_xlim(umap_lim[0])
ax.set_ylim(umap_lim[1])
ax.scatter(x=adata.obsm['X_umap'][:,0],y=adata.obsm['X_umap'][:,1],s=s,c=color)
import matplotlib.lines as mlines
ax.legend(handles=[mlines.Line2D([],[],marker='o',color=i,linestyle='') for i in c+[b]],
labels=genes + ['None'],frameon=False,
loc='upper left',bbox_to_anchor=[1,1])
if save:
output = '_'.join(genes)
plt.savefig(os.path.join(dir,'sctri_multi_gene_plot_{}.{}'.format(output,format)),bbox_inches='tight')
plt.close()
return ax
def make_sure_mat_dense(mat):
'''
make sure a matrix is dense
:param mat: ndarary
:return mat: ndarray (dense)
Examples::
mat = make_sure_mat_dense(mat)
'''
if not issparse(mat):
pass
else:
mat = mat.toarray()
return mat
def make_sure_mat_sparse(mat): # will be csr if the input mat is a dense array
'''
make sure a matrix is sparse
:param mat: ndarary
:return mat: ndarray (sparse)
Examples::
mat = make_sure_mat_dense(mat)
'''
if not issparse(mat):
mat = csr_matrix(mat)
else:
pass
return mat
class Normalization(object):
'''
a series of Normalization functions
Now support:
1. CLR normalization
2. total count normalization (CPTT, CPM)
3. GMM normalization
'''
# matrix should be cell x feature, expecting a ndarray
@staticmethod
def CLR_normalization(mat):
'''
Examples::
from sctriangulate.preprocessing import Normalization
post_mat = Normalization.CLR_normalization(pre_mat)
'''
from scipy.stats import gmean
gmeans = gmean(mat+1,axis=1).reshape(-1,1)
post = np.log(mat/gmeans + 1)
return post
@staticmethod
def total_normalization(mat,target=1e4):
'''
Examples::
from sctriangulate.preprocessing import Normalization
post_mat = Normalization.total_normalization(pre_mat)
'''
total = np.sum(mat,axis=1).reshape(-1,1)
sf = total/target
post = np.log(mat/sf + 1)
return post
@staticmethod
def GMM_normalization(mat):
'''
Examples::
from sctriangulate.preprocessing import Normalization
post_mat = Normalization.GMM_normalization(pre_mat)
'''
mat = Normalization.total_normalization(mat)
from sklearn.mixture import GaussianMixture
model = GaussianMixture(n_components=2,random_state=0)
model.fit(mat)
means = model.means_ # (n_components,n_features)
bg_index = np.argmin(means.mean(axis=1))
bg_mean = means[bg_index,:].reshape(1,-1)
post = mat - bg_mean
return post
def gene_activity_count_matrix_new_10x(fall_in_promoter,fall_in_gene,valid=None):
'''
Full explanation please refer to ``gene_activity_count_matrix_old_10x``
Examples::
from sctriangulate.preprocessing import gene_activity_count_matrix_new_10x
gene_activity_count_matrix_new_10x(fall_in_promoter,fall_in_gene,valid=None)
'''
gene_promoter = pd.read_csv(fall_in_promoter,sep='\t',header=None)
gene_body = pd.read_csv(fall_in_gene,sep='\t',header=None)
bucket = []
for i in range(gene_promoter.shape[0]):
row = gene_promoter.iloc[i]
in_gene = row[3]
in_barcode = row[6]
in_count = row[7]
try:
in_barcode = in_barcode.split(';')
in_count = [int(item) for item in in_count.split(';')]
except AttributeError: # means no fragments fall into the promoter
continue
# tmp will be three column, barcode, count, gene, no index
tmp = pd.DataFrame({'barcode':in_barcode,'count':in_count}).groupby(by='barcode')['count'].sum().to_frame()
tmp['gene'] = np.full(shape=tmp.shape[0],fill_value=in_gene)
tmp.reset_index(inplace=True)
bucket.append(tmp)
for i in range(gene_body.shape[0]):
row = gene_body.iloc[i]
in_gene = row[3]
in_barcode = row[6]
in_count = row[7]
try:
in_barcode = in_barcode.split(';')
in_count = [int(item) for item in in_count.split(';')]
except AttributeError: # means no fragments fall into the promoter
continue
# tmp will be three column, barcode, count, gene, no index
tmp = pd.DataFrame({'barcode':in_barcode,'count':in_count}).groupby(by='barcode')['count'].sum().to_frame()
tmp['gene'] = np.full(shape=tmp.shape[0],fill_value=in_gene)
tmp.reset_index(inplace=True)
bucket.append(tmp)
df = pd.concat(bucket)
if valid is not None:
df = df.loc[df['barcode'].isin(valid),:]
final = df.groupby(by=['barcode','gene'])['count'].sum().unstack(fill_value=0)
return final
def gene_activity_count_matrix_old_10x(fall_in_promoter,fall_in_gene,valid=None):
'''
this function is to generate gene activity count matrix, please refer to ``gene_activity_count_matrix_new_10x`` for latest
version of 10x fragements.tsv output.
how to get these two arguments? (LIGER team approach)
1. sort the fragment, gene and promoter bed, or use function in this module to sort the reference bed files::
sort -k1,1 -k2,2n -k3,3n pbmc_granulocyte_sorted_10k_atac_fragments.tsv > atac_fragments.sort.bed
sort -k 1,1 -k2,2n -k3,3n hg19_genes.bed > hg19_genes.sort.bed
sort -k 1,1 -k2,2n -k3,3n hg19_promoters.bed > hg19_promoters.sort.bed
2. bedmap::
module load bedops
bedmap --ec --delim "\t" --echo --echo-map-id hg19_promoters.sort.bed atac_fragments.sort.bed > atac_promoters_bc.bed
bedmap --ec --delim "\t" --echo --echo-map-id hg19_genes.sort.bed atac_fragments.sort.bed > atac_genes_bc.bed
the following was taken from http://htmlpreview.github.io/?https://github.com/welch-lab/liger/blob/master/vignettes/Integrating_scRNA_and_scATAC_data.html
* **delim**. This changes output delimiter from ‘|’ to indicated delimiter between columns, which in our case is “\t”.
* **ec**. Adding this will check all problematic input files.
* **echo**. Adding this will print each line from reference file in output. The reference file in our case is gene or promoter index.
* **echo-map-id**. Adding this will list IDs of all overlapping elements from mapping files, which in our case are cell barcodes from fragment files.
3. Finally::
from sctriangulate.preprocessing import gene_activity_count_matrix_old_10x
gene_activity_count_matrix_old_10x(fall_in_promoter,fall_in_gene,valid=None)
'''
gene_promoter = pd.read_csv(fall_in_promoter,sep='\t',header=None)
gene_body = pd.read_csv(fall_in_gene,sep='\t',header=None)
bucket = []
for i in range(gene_promoter.shape[0]):
row = gene_promoter.iloc[i]
in_gene = row[3]
in_barcode = row[6]
try:
in_barcode = in_barcode.split(';')
except AttributeError: # means no fragments fall into the promoter
continue
tmp = pd.Series(in_barcode).value_counts().to_frame(name='count')
tmp['gene'] = np.full(shape=tmp.shape[0],fill_value=in_gene)
tmp.reset_index(inplace=True) # three column: index, count, gene
bucket.append(tmp)
for i in range(gene_body.shape[0]):
row = gene_body.iloc[i]
in_gene = row[3]
in_barcode = row[6]
try:
in_barcode = in_barcode.split(';')
except AttributeError: # means no fragments fall into the promoter
continue
tmp = pd.Series(in_barcode).value_counts().to_frame(name='count')
tmp['gene'] = np.full(shape=tmp.shape[0],fill_value=in_gene)
tmp.reset_index(inplace=True) # three column: index, count, gene
bucket.append(tmp)
df = pd.concat(bucket)
if valid is not None:
df = df.loc[df['index'].isin(valid),:]
final = df.groupby(by=['index','gene'])['count'].sum().unstack(fill_value=0)
return final
def gene_bed_to_promoter_bed(gene_bed_path,promoter_bed_path,up_bp=3000):
gene_bed = pd.read_csv(gene_bed_path,header=None,sep='\t')
with open(promoter_bed_path,'w') as f:
for i in range(gene_bed.shape[0]):
row = gene_bed.iloc[i]
chro = row[0]
start = row[1]
end = row[2]
name = row[3]
score = row[4]
strand = row[5]
if strand == '+':
new_start = start - up_bp
new_end = start
else:
new_start = end
new_end = end + up_bp
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(chro,new_start,new_end,name,score,strand))
def ensembl_gtf_to_gene_bed(gtf_path,bed_path,sort=True):
# only interested in gene feature
hg38_gtf = pd.read_csv(gtf_path,skiprows=5,header=None,sep='\t')
hg38_gtf_gene = hg38_gtf.loc[hg38_gtf[2]=='gene',:]
# gotta have gene symbol
col = []
for i in range(hg38_gtf_gene.shape[0]):
metadata = hg38_gtf_gene.iloc[i,8]
if 'gene_name' in metadata:
col.append(True)
else:
col.append(False)
hg38_gtf_gene_have_symbol = hg38_gtf_gene.loc[col,:]
# add biotype and gene name
col1 = []
col2 = []
for i in range(hg38_gtf_gene_have_symbol.shape[0]):
metadata = hg38_gtf_gene_have_symbol.iloc[i, 8]
biotype = metadata.split('; ')[-1].split(' ')[-1].strip(';').strip('"')
name = metadata.split('; ')[2].split(' ')[1].strip('"')
col1.append(biotype)
col2.append(name)
hg38_gtf_gene_have_symbol['biotype'] = col1
hg38_gtf_gene_have_symbol['name'] = col2
# biotype has to be either protein_coding, IG or TR gene
col = (hg38_gtf_gene_have_symbol['biotype']=='protein_coding') |\
(hg38_gtf_gene_have_symbol['biotype']=='IG_C_gene') |\
(hg38_gtf_gene_have_symbol['biotype']=='IG_D_gene') |\
(hg38_gtf_gene_have_symbol['biotype']=='IG_J_gene') |\
(hg38_gtf_gene_have_symbol['biotype']=='IG_V_gene') |\
(hg38_gtf_gene_have_symbol['biotype']=='TR_C_gene') |\
(hg38_gtf_gene_have_symbol['biotype']=='TR_D_gene') |\
(hg38_gtf_gene_have_symbol['biotype']=='TR_J_gene') |\
(hg38_gtf_gene_have_symbol['biotype']=='TR_V_gene')
hg38_gtf_gene_have_symbol_biotype_correct = hg38_gtf_gene_have_symbol.loc[col,:]
# chromsome need to be correct
chr_need_chr = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22']
chr_need_int = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22]
chr_need_other = ['X','Y'] # don't include MT decause fragment.tsv file doesn't output that
chr_need = chr_need_chr + chr_need_int + chr_need_other
hg38_gtf_gene_have_symbol_biotype_correct_chr = hg38_gtf_gene_have_symbol_biotype_correct.loc[hg38_gtf_gene_have_symbol_biotype_correct[0].isin(chr_need),:]
prefixed_chr = ['chr' + str(item) for item in hg38_gtf_gene_have_symbol_biotype_correct_chr[0]]
hg38_gtf_gene_have_symbol_biotype_correct_chr[0] = prefixed_chr
# get final result, BED6 format
final = hg38_gtf_gene_have_symbol_biotype_correct_chr.loc[:,[0,3,4,'name',5,6]]
if sort:
'''
equivalent to:
sort -k1,1 -k2,2n -k3,3n gene.bed
'''
final.sort_values(by=[0,3,4],inplace=True)
final.to_csv(bed_path,sep='\t',index=None,header=None)
# this function is taken from episcanpy, all the credits to the original developer:
# https://github.com/colomemaria/epiScanpy/blob/master/episcanpy/tools/_find_genes.py
# to download the gtf file
'''
https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_19/gencode.v19.annotation.gtf.gz
https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/gencode.v38.annotation.gtf.gz
'''
def find_genes(adata,
gtf_file,
key_added='gene_annotation',
upstream=2000,
downstream=0,
feature_type='gene',
annotation='HAVANA',
raw=False):
"""
This function is taken from `episcanpy <https://github.com/colomemaria/epiScanpy/blob/master/episcanpy/tools/_find_genes.py>`_.
all the credits to the original developer
merge values of peaks/windows/features overlapping genebodies + 2kb upstream. It is possible to extend the search for closest gene to a given
number of bases downstream as well. There is commonly 2 set of annotations in a gtf file(HAVANA, ENSEMBL). By default, the function will search
annotation from HAVANA but other annotation label/source can be specifed. It is possible to use other type of features than genes present in a
gtf file such as transcripts or CDS.
Examples::
from sctriangulate.preprocessing import find_genes
find_genes(adata,gtf_file='gencode.v38.annotation.gtf)
"""
### extracting the genes
gtf = {}
with open(gtf_file) as f:
for line in f:
if line[0:2] != '##' and '\t'+feature_type+'\t' in line and '\t'+annotation+'\t' in line:
line = line.rstrip('\n').split('\t')
if line[6] == '-':
if line[0] not in gtf.keys():
gtf[line[0]] = [[int(line[3])-downstream, int(line[4])+upstream,line[-1].split(';')[:-1]]]
else:
gtf[line[0]].append([int(line[3])-downstream, int(line[4])+upstream,line[-1].split(';')[:-1]])
else:
if line[0] not in gtf.keys():
gtf[line[0]] = [[int(line[3])-upstream, int(line[4])+downstream,line[-1].split(';')[:-1]]]
else:
gtf[line[0]].append([int(line[3])-upstream, int(line[4])+downstream,line[-1].split(';')[:-1]])
# extracting the feature coordinates
raw_adata_features = {}
feature_index = 0
for line in adata.var_names.tolist():
line = line.split('_')
if line[0] not in raw_adata_features.keys():
raw_adata_features[line[0]] = [[int(line[1]),int(line[2]), feature_index]]
else:
raw_adata_features[line[0]].append([int(line[1]),int(line[2]), feature_index])
feature_index += 1
## find the genes overlaping the features.
gene_index = []
for chrom in raw_adata_features.keys():
if chrom in gtf.keys():
chrom_index = 0
previous_features_index = 0
for feature in raw_adata_features[chrom]:
gene_name = []
feature_start = feature[0]
feature_end = feature[1]
for gene in gtf[chrom]:
if (gene[1]<= feature_start): # the gene is before the feature. we need to test the next gene.
continue
elif (feature_end <= gene[0]): # the gene is after the feature. we need to test the next feature.
break
else: # the window is overlapping the gene.
for n in gene[-1]:
if 'gene_name' in n:
gene_name.append(n.lstrip('gene_name "').rstrip('""'))
if gene_name == []:
gene_index.append('intergenic')
elif len(gene_name)==1:
gene_index.append(gene_name[0])
else:
gene_index.append(";".join(list(set(gene_name))))
adata.var[key_added] = gene_index
def reformat_peak(adata,canonical_chr_only=True):
'''
To use ``find_genes`` function, please first reformat the peak from 10X format "chr1:10109-10357" to
find_gene format "chr1_10109_10357"
:param adata: AnnData
:param canonical_chr_only: boolean, only kept the canonical chromosome
:return: AnnData
Examples::
from sctriangulate.preprocessing import reformat_peak
adata = reformat_peak(adata,canonical_chr_only=True)
'''
var_names = adata.var_names
valid = set(['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16',
'chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY'])
col = []
for item in var_names:
chr_ = item.split(':')[0]
if chr_ in valid:
start = item.split(':')[1].split('-')[0]
end = item.split(':')[1].split('-')[1]
now = '_'.join([chr_,start,end])
col.append(now)
else:
col.append('non_canonical_chr')
adata.var_names = col
if canonical_chr_only:
adata = adata[:,adata.var_names!='non_canonical_chr'].copy()
return adata
def plot_coexpression(adata,gene1,gene2,kind,hist2d_bins=50,hist2d_cmap=bg_greyed_cmap('viridis'),hist2d_vmin=1e-5,hist2d_vmax=None,
scatter_dot_color='blue',contour_cmap='viridis',contour_levels=None,contour_scatter=True,contour_scatter_dot_size=5,
contour_train_kde='valid',surface3d_cmap='coolwarm',save=True,outdir='.',name=None):
x = np.squeeze(make_sure_mat_dense(adata[:,gene1].X))
y = np.squeeze(make_sure_mat_dense(adata[:,gene2].X))
if kind == 'scatter':
fig,ax = plt.subplots()
ax.scatter(x,y,color=scatter_dot_color)
ax.set_xlabel('{}'.format(gene1))
ax.set_ylabel('{}'.format(gene2))
elif kind == 'hist2d':
fig,ax = plt.subplots()
hist2d = ax.hist2d(x,y,bins=hist2d_bins,cmap=hist2d_cmap,vmin=hist2d_vmin,vmax=hist2d_vmax)
fig.colorbar(mappable=hist2d[3],ax=ax)
ax.set_xlabel('{}'.format(gene1))
ax.set_ylabel('{}'.format(gene2))
elif kind == 'contour':
from scipy.stats import gaussian_kde
fig,ax = plt.subplots()
X,Y = np.meshgrid(np.linspace(x.min(),x.max(),100),np.linspace(y.min(),y.max(),100))
positions = np.vstack([X.ravel(),Y.ravel()]) # (2, 10000)
values = np.vstack([x,y]) # (2, 2700)
if contour_train_kde == 'valid': # data points that are non-zero for both gene1 and gen2
values_to_kde = values[:,np.logical_not(np.any(values==0,axis=0))]
elif contour_train_kde == 'semi_valid': # data points that are non-zero for at least one of the gene
values_to_kde = values[:,np.logical_not(np.all(values==0,axis=0))]
elif contour_train_kde == 'full': # all data points will be used for kde estimation
values_to_kde == values
kernel = gaussian_kde(values_to_kde)
density = kernel(positions) # (10000,)
density = density.reshape(X.shape) # (100,100)
cset = ax.contour(X,Y,density,levels=contour_levels,cmap=contour_cmap)
if contour_scatter:
dot_density = kernel(values)
dot_density_color = [cm.viridis(round(np.interp(x=item,xp=[dot_density.min(),dot_density.max()],fp=[0,255]))) for item in dot_density]
ax.scatter(x,y,c=dot_density_color,s=contour_scatter_dot_size)
from matplotlib.colors import Normalize
fig.colorbar(mappable=cm.ScalarMappable(norm=Normalize(),cmap=contour_cmap),ax=ax)
ax.set_xlabel('{}'.format(gene1))
ax.set_ylabel('{}'.format(gene2))
elif kind == 'contourf':
from scipy.stats import gaussian_kde
fig,ax = plt.subplots()
X,Y = np.meshgrid(np.linspace(x.min(),x.max(),100),np.linspace(y.min(),y.max(),100))
positions = np.vstack([X.ravel(),Y.ravel()]) # (2, 10000)
values = np.vstack([x,y]) # (2, 2700)
if contour_train_kde == 'valid': # data points that are non-zero for both gene1 and gen2
values_to_kde = values[:,np.logical_not(np.any(values==0,axis=0))]
elif contour_train_kde == 'semi_valid': # data points that are non-zero for at least one of the gene
values_to_kde = values[:,np.logical_not(np.all(values==0,axis=0))]
elif contour_train_kde == 'full': # all data points will be used for kde estimation
values_to_kde == values
kernel = gaussian_kde(values_to_kde)
density = kernel(positions) # (10000,)
density = density.reshape(X.shape) # (100,100)
cfset = ax.contourf(X,Y,density,levels=contour_levels,cmap=contour_cmap)
cset = ax.contour(X,Y,density,levels=contour_levels,colors='k')
clable = ax.clabel(cset,inline=True,fontsize=5)
from matplotlib.colors import Normalize
fig.colorbar(mappable=cm.ScalarMappable(norm=Normalize(),cmap=contour_cmap),ax=ax)
ax.set_xlabel('{}'.format(gene1))
ax.set_ylabel('{}'.format(gene2))
elif kind == 'surface3d':
fig = plt.figure()
from scipy.stats import gaussian_kde
X,Y = np.meshgrid(np.linspace(x.min(),x.max(),100),np.linspace(y.min(),y.max(),100))
positions = np.vstack([X.ravel(),Y.ravel()]) # (2, 10000)
values = np.vstack([x,y]) # (2, 2700)
if contour_train_kde == 'valid': # data points that are non-zero for both gene1 and gen2
values_to_kde = values[:,np.logical_not(np.any(values==0,axis=0))]
elif contour_train_kde == 'semi_valid': # data points that are non-zero for at least one of the gene
values_to_kde = values[:,np.logical_not(np.all(values==0,axis=0))]
elif contour_train_kde == 'full': # all data points will be used for kde estimation
values_to_kde == values
kernel = gaussian_kde(values_to_kde)
density = kernel(positions) # (10000,)
density = density.reshape(X.shape) # (100,100)
ax = plt.axes(projection='3d')
surf = ax.plot_surface(X,Y,density,cmap=surface3d_cmap)
ax.set_xlabel('{}'.format(gene1))
ax.set_ylabel('{}'.format(gene2))
ax.set_zlabel('PDF for KDE')
fig.colorbar(mappable=surf,ax=ax)
if save:
if name is None:
plt.savefig(os.path.join(outdir,'coexpression_{}_{}_{}_plot.pdf'.format(kind,gene1,gene2)),bbox_inches='tight')
plt.close()
else:
plt.savefig(os.path.join(outdir,name),bbox_inches='tight')
plt.close()
return ax
def umap_color_exceed_102(adata,key,dot_size=None,legend_fontsize=6,outdir='.',name=None):
'''
draw a umap that bypass the scanpy 102 color upper bound, this can generate as many as 433 clusters.
:param adata: Anndata
:param key: the categorical column in adata.obs, which will be plotted
:param dot_size: None or number
:param legend_fontsize: defualt is 6
:param outdir: output directory, default is '.'
:param name: name of the plot, default is None
Exmaple::
from sctriangulate.preprocessing import umap_color_exceed_102
umap_color_exceed_102(adata,key='leiden6') # more than 130 clusters
.. image:: ./_static/more_than102.png
:height: 550px
:width: 550px
:align: center
:target: target
'''
fig,ax = plt.subplots()
mapping = colors_for_set(adata.obs[key].unique().tolist())
color = adata.obs[key].map(mapping).values
if dot_size is None:
dot_size = 120000/adata.shape[0]
ax.scatter(adata.obsm['X_umap'][:,0],adata.obsm['X_umap'][:,1],c=color,s=dot_size)
import matplotlib.lines as mlines
ax.legend(handles=[mlines.Line2D([],[],marker='o',linestyle='',color=i) for i in mapping.values()],
labels=[i for i in mapping.keys()],loc='upper left',bbox_to_anchor=(1,1),ncol=3,frameon=False,prop={'size':6})
if name is None:
name = 'umap_{}_exceed_102.pdf'.format(key)
plt.savefig(os.path.join(outdir,name),bbox_inches='tight')
plt.close()
def custom_two_column_sankey(adata,left_annotation,right_annotation,opacity=0.6,pad=3,thickness=10,margin=300,text=True,save=True,as_html=True,outdir='.'):
import plotly.graph_objects as go
import kaleido
df = adata.obs.loc[:,[left_annotation,right_annotation]]
node_label = df[left_annotation].unique().tolist() + df[right_annotation].unique().tolist()
node_color = pick_n_colors(len(node_label))
link = []
for source,sub in df.groupby(by=left_annotation):
for target,subsub in sub.groupby(by=right_annotation):
if subsub.shape[0] > 0:
link.append((source,target,subsub.shape[0],subsub.shape[0]/sub.shape[0]))
link_info = list(zip(*link))
link_source = [node_label.index(item) for item in link_info[0]]
link_target = [node_label.index(item) for item in link_info[1]]
link_value = link_info[2]
link_pert = [round(item,2) for item in link_info[3]]
link_color = ['rgba{}'.format(tuple([infer_to_256(item) for item in to_rgb(node_color[i])] + [opacity])) for i in link_source]
node_plotly = dict(pad = pad, thickness = thickness,line = dict(color = "grey", width = 0.1),label = node_label,color = node_color)
link_plotly = dict(source=link_source,target=link_target,value=link_value,color=link_color,customdata=link_pert,
hovertemplate='%{source.label} -> %{target.label} <br /> number of cells: %{value} <br /> percentage: %{customdata}')
if not text:
fig = go.Figure(data=[go.Sankey(node = node_plotly,link = link_plotly, textfont=dict(color='rgba(0,0,0,0)',size=1))])
else:
fig = go.Figure(data=[go.Sankey(node = node_plotly,link = link_plotly)])
fig.update_layout(title_text='sankey_{}_{}'.format(left_annotation,right_annotation), font_size=6, margin=dict(l=margin,r=margin))
if save:
if not as_html:
fig.write_image(os.path.join(outdir,'two_column_sankey_{}_{}_text_{}.pdf'.format(left_annotation,right_annotation,text)))
else:
fig.write_html(os.path.join(outdir,'two_column_sankey_{}_{}_text_{}.html'.format(left_annotation,right_annotation,text)),include_plotlyjs='cdn')
def rna_umap_transform(outdir,ref_exp,ref_group,q_exp_list,q_group_list,q_identifier_list,pca_n_components,umap_min_dist=0.5):
'''
Take a reference expression matrix (pandas dataframe), and a list of query expression matrics (pandas dataframe),
along with a list of query expression identifiers. This function will generate the umap transformation of your reference exp,
and make sure to squeeze your each query exp to the same umap transformation.
:param outdir: the path in which all the results will go
:param ref_exp: the pandas dataframe object,index is the features, column is the cell barcodes
:param ref_group: the pandas series object, index is the cell barcodes, the value is the clusterlabel information
:param q_exp_list: the list of pandas dataframe object, requirement is the same as ref_exp
:param q_group_list: the list of pandas series object, requirement is the same as ref_group
:param q_identifier_list: the list of string, to denote the name of each query dataset
:param pca_n_components: the int, denote the PCs to use for PCA
:param umap_min_dist: the float number, denote the min_dist parameter for umap program
Examples::
rna_umap_transform(outdir='.',ref_exp=ref_df,ref_group=ref_group_df,q_exp_list=[q1_df],q_group_list=[q1_group_df],q_identifier_list=['q1'],pca_n_components=50)
'''
# create outdir if not exist
if not os.path.exists(outdir):
os.mkdir(outdir)
ref_variable = ref_exp.index.values
store_df = []
store_variable = []
for q_exp in q_exp_list:
q_variable = q_exp.index.values
store_df.append(q_exp)
store_variable.append(q_variable)
# find common features between ref and all qs.
all_variable = copy.deepcopy(store_variable)
all_variable.insert(0,ref_variable)
common_variable = list(reduce(lambda a,b: set(a).intersection(set(b)),all_variable))
# subset the exps, also transpose
ref_exp_common = ref_exp.loc[common_variable,:].T
store_df_common = []
for q_exp in store_df:
q_exp_common = q_exp.loc[common_variable,:].T
store_df_common.append(q_exp_common)
# train pca and umap on ref
ref_pca_model = PCA(n_components = pca_n_components).fit(ref_exp_common.values)
ref_pca_score = ref_pca_model.transform(ref_exp_common.values)
ref_umap_model = umap.UMAP(min_dist=umap_min_dist).fit(ref_pca_score)
ref_umap_embed = ref_umap_model.embedding_
# transform all the querys
store_q_umap = []
for q_exp_common in store_df_common:
q_pca_score = ref_pca_model.transform(q_exp_common.values)
q_umap_embed = ref_umap_model.transform(q_pca_score)
store_q_umap.append(q_umap_embed)
ref_umap_embed_df = pd.DataFrame(data=ref_umap_embed,index=ref_exp_common.index,columns=['umap_x','umap_y'])
ref_umap_embed_df.to_csv(os.path.join(outdir,'ref_umap.txt'),sep='\t')
for i,item in enumerate(store_q_umap):
q_exp_common = store_df_common[i]
item = pd.DataFrame(data=item,index=q_exp_common.index,columns=['umap_x','umap_y'])
item.to_csv(os.path.join(outdir,'query_{}_umap.txt'.format(q_identifier_list[i])),sep='\t')
# visualization
all_identifier = ['ref'] + q_identifier_list
all_exp = [ref_exp_common] + store_df_common
all_label_mapping = [group_df.to_dict() for group_df in [ref_group] + q_group_list]
all_umap = [ref_umap_embed] + store_q_umap
for i,exp,label_map,embed in zip(all_identifier,all_exp,all_label_mapping,all_umap):
adata = ad.AnnData(X=exp.values,obs=pd.DataFrame(index=exp.index),var= | pd.DataFrame(index=exp.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
import string
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler
from sklearn.datasets import make_regression
def simulate_seasonal_term(periodicity, total_cycles, noise_std=1.,
harmonics=None):
"""Generates a seasonality term"""
# https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_seasonal.html
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn())
gamma_star_jtp1[j - 1] = (- gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn())
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
def make_synthetic_series(seed=0):
"""Generate synthetic data with regressors"""
np.random.seed(seed)
# simulate seasonality
seasonality_term = simulate_seasonal_term(periodicity=52, total_cycles=4, harmonics=3)
# scale data
scaler = MinMaxScaler(feature_range=(np.max(seasonality_term), np.max(seasonality_term) * 2))
seasonality_term = scaler.fit_transform(seasonality_term[:, None])
# datetime index
dt = pd.date_range(start='2016-01-04', periods=len(seasonality_term), freq='7D')
# create df
df = | pd.DataFrame(seasonality_term, columns=['response'], index=dt) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
from sklearn.neighbors import DistanceMetric
import networkx as nx
from itertools import combinations
def undo_PCA(x, pca, pca_comp):
mu = np.mean(x, axis=0)
xhat = np.dot(pca.transform(x), pca_comp)
xhat += mu
print(xhat.shape)
return xhat.T
def emb2exp(semb, gemb, semb_bias, gemb_bias):
x = np.dot(semb, gemb.T)
x += semb_bias
x += gemb_bias.T
print(x.shape)
return x
def plot_samp_3dPCA(samp_pca, cats, cat2col,
subset_idxs=[], showcat=False, showlegend=True,
alpha=0.1, s=25, fs=(20,20)):
if len(subset_idxs)==0:
X = samp_pca[0]
Y = samp_pca[1]
Z = samp_pca[2]
else:
X = samp_pca[0][subset_idxs]
Y = samp_pca[1][subset_idxs]
Z = samp_pca[2][subset_idxs]
colors = [cat2col[c] for c in cats]
fig = plt.figure(figsize=fs)
ax = fig.gca(projection='3d')
ax.scatter(X, Y, Z, c=colors, s=s, alpha=alpha)
if showcat:
for x, y, z, c in zip(X, Y, Z, cats): ax.text(x, y, z, c, fontsize=8)
if showlegend:
proxies = []
for c in cat2col: proxies.append(plt.Rectangle((0, 0), 1, 1, fc=cat2col[c]))
ax.legend(proxies, list(set(list(cat2col))), numpoints = 1)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')
plt.show()
def plot_gene_3dPCA(gene_pca, genelist,
hl_idxs=[], hl_cols=['r', 'g', 'b'],
showhlgene=True, showbg=True,
bgcol=(0.5,0.5,0.5), bgs=30, bgalpha=0.1,
hlfs=10, hls=30, hlalpha=0.8, fs=(20,20)):
X = gene_pca[0]
Y = gene_pca[1]
Z = gene_pca[2]
fig = plt.figure(figsize=fs)
ax = fig.gca(projection='3d')
if showbg: ax.scatter(X, Y, Z, c=(0.5,0.5,0.5), s=bgs, alpha=bgalpha)
for i, hl in enumerate(hl_idxs):
for idx in hl:
if showhlgene: ax.text(X[idx],Y[idx],Z[idx],genelist[idx], color=hl_cols[i], fontsize=hlfs)
ax.scatter(X[idx],Y[idx],Z[idx], c=hl_cols[i], s=hls, alpha=hlalpha)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')
plt.show()
def find_centroid(emb, sid2ca, sids, cancer):
idxs = [i for i,s in enumerate(sids) if sid2ca[s]==cancer]
arr = np.array([emb[i] for i in idxs])
return np.mean(arr, axis=0)
def print_gdist(g1, g2, dist, gene2idx):
print(dist[gene2idx[g1]][gene2idx[g2]])
def get_emb_dist(emb, return_pd=True, index=[], distmetric='euclidean', masked=False, maskval=10):
dist = DistanceMetric.get_metric(distmetric)
emb_dist = np.absolute(dist.pairwise(emb))
if masked:
utri = np.triu_indices(len(emb_dist))
emb_dist_masked = emb_dist
emb_dist_masked[utri] = maskval
res = emb_dist_masked
else:
res = emb_dist
if return_pd: res = pd.DataFrame(res, index=index, columns=index)
print('shape: %s; mean: %.3f; std: %.3f' % (str(emb_dist.shape), emb_dist.mean(), emb_dist.std()))
mean = np.mean(emb_dist, axis=0)
std = np.std(emb_dist, axis=0)
return res, mean, std
def n_closest_nbs(dist, gene, n=10):
n += 1
arr = np.array(dist[gene])
nb_idx = np.argpartition(arr, n)[:n]
tdf = pd.DataFrame(dist[gene][nb_idx]).T.assign(parent=gene)
mdf = pd.melt(tdf, id_vars='parent', var_name='child', value_name='l2dist')
mdf = mdf[mdf.child != gene]
return mdf
def pull_close_nbs(dist, gene, th, distmetric='l2dist'):
arr = np.array(dist[gene])
nb_idx = np.where(arr < th)[0]
tdf = pd.DataFrame(dist[gene][nb_idx]).T.assign(parent=gene)
mdf = pd.melt(tdf, id_vars='parent', var_name='child', value_name=distmetric)
mdf = mdf[mdf.child != gene]
return mdf
def get_close_nbs_mdf(dist, genes, th, verbose=True):
dist_nbs_mdfs = [pull_close_nbs(dist, g, th=th) for g in genes]
dist_nbs_mdf = pd.concat(dist_nbs_mdfs)
if verbose: print('len: %d' % len(dist_nbs_mdf))
return dist_nbs_mdf
def get_connectivity_score(mdf):
arr = list(mdf['parent']) + list(mdf['child'])
genes = list(set(arr))
print('counting gene connectivity...')
counts = [arr.count(g) for g in genes]
total_count = np.sum(counts)
print('calculating score...')
percent = [c/total_count for c in counts]
pds = pd.Series(percent, index=genes)
print('# of genes: %d' % len(genes))
return pds, genes
def get_cs_df(mdf1, mdf2, colname=['cancer', 'normal'], get_diff=False, show_zscore=False, show_outliers=False, z_cutoff=3):
print('getting score of set 1...')
cs1, g1 = get_connectivity_score(mdf1)
print('getting score of set 2...')
cs2, g2 = get_connectivity_score(mdf2)
print('# of common genes: %d' % len(list(set(g1) & set(g2))))
cs1_df = cs1.to_frame(); cs1_df.columns = [colname[0]]
cs2_df = cs2.to_frame(); cs2_df.columns = [colname[1]]
cs_df = | pd.concat([cs1_df, cs2_df], axis=1) | pandas.concat |
from abc import abstractmethod
import logging
import os
import re
import tempfile
from typing import List, Union
import arff
import numpy as np
import pandas as pd
import pandas.api.types as pat
from ..data import Dataset, DatasetType, Datasplit, Feature
from ..datautils import read_csv, to_data_frame
from ..resources import config as rconfig
from ..utils import Namespace as ns, as_list, lazy_property, list_all_files, memoize, path_from_split, profile, split_path
from .fileutils import download_file, is_archive, is_valid_url, unarchive_file, url_exists
log = logging.getLogger(__name__)
train_search_pat = re.compile(r"(?:(.*)[_-])train(?:[_-](\d+))?\.\w+")
test_search_pat = re.compile(r"(?:(.*)[_-])test(?:[_-](\d+))?\.\w+")
class FileLoader:
def __init__(self, cache_dir=None):
self._cache_dir = cache_dir if cache_dir else tempfile.mkdtemp(prefix='amlb_cache')
@profile(logger=log)
def load(self, dataset, fold=0):
dataset = dataset if isinstance(dataset, ns) else ns(path=dataset)
log.debug("Loading dataset %s", dataset)
paths = self._extract_train_test_paths(dataset.path if 'path' in dataset else dataset, fold=fold)
assert fold < len(paths['train']), f"No training dataset available for fold {fold} among dataset files {paths['train']}"
# seed = rget().seed(fold)
# if len(paths['test']) == 0:
# log.warning("No test file in the dataset, the train set will automatically be split 90%/10% using the given seed.")
# else:
assert fold < len(paths['test']), f"No test dataset available for fold {fold} among dataset files {paths['test']}"
target = dataset['target']
type_ = dataset['type']
features = dataset['features']
ext = os.path.splitext(paths['train'][fold])[1].lower()
train_path = paths['train'][fold]
test_path = paths['test'][fold] if len(paths['test']) > 0 else None
log.info(f"Using training set {train_path} with test set {test_path}.")
if ext == '.arff':
return ArffDataset(train_path, test_path, target=target, features=features, type=type_)
elif ext == '.csv':
return CsvDataset(train_path, test_path, target=target, features=features, type=type_)
else:
raise ValueError(f"Unsupported file type: {ext}")
def _extract_train_test_paths(self, dataset, fold=None):
if isinstance(dataset, (tuple, list)):
assert len(dataset) % 2 == 0, "dataset list must contain an even number of paths: [train_0, test_0, train_1, test_1, ...]."
return self._extract_train_test_paths(ns(train=[p for i, p in enumerate(dataset) if i % 2 == 0],
test=[p for i, p in enumerate(dataset) if i % 2 == 1]),
fold=fold)
elif isinstance(dataset, ns):
return dict(train=[self._extract_train_test_paths(p)['train'][0]
if i == fold else None
for i, p in enumerate(as_list(dataset.train))],
test=[self._extract_train_test_paths(p)['train'][0]
if i == fold else None
for i, p in enumerate(as_list(dataset.test))])
else:
assert isinstance(dataset, str)
dataset = os.path.expanduser(dataset)
dataset = dataset.format(**rconfig().common_dirs)
if os.path.exists(dataset):
if os.path.isfile(dataset):
if is_archive(dataset):
arch_name, _ = os.path.splitext(os.path.basename(dataset))
dest_folder = os.path.join(self._cache_dir, arch_name)
if not os.path.exists(dest_folder): # don't uncompress if previously done
dest_folder = unarchive_file(dataset, dest_folder)
return self._extract_train_test_paths(dest_folder)
else:
return dict(train=[dataset], test=[])
elif os.path.isdir(dataset):
files = list_all_files(dataset)
log.debug("Files found in dataset folder %s: %s", dataset, files)
assert len(files) > 0, f"Empty folder: {dataset}"
if len(files) == 1:
return dict(train=files, test=[])
train_matches = [m for m in [train_search_pat.search(f) for f in files] if m]
test_matches = [m for m in [test_search_pat.search(f) for f in files] if m]
# verify they're for the same dataset (just based on name)
assert train_matches and test_matches, f"Folder {dataset} must contain at least one training and one test dataset."
root_names = {m[1] for m in (train_matches+test_matches)}
assert len(root_names) == 1, f"All dataset files in {dataset} should follow the same naming: xxxxx_train_N.ext or xxxxx_test_N.ext with N starting from 0."
train_no_fold = next((m[0] for m in train_matches if m[2] is None), None)
test_no_fold = next((m[0] for m in test_matches if m[2] is None), None)
if train_no_fold and test_no_fold:
return dict(train=[train_no_fold], test=[test_no_fold])
paths = dict(train=[], test=[])
fold = 0
while fold >= 0:
train = next((m[0] for m in train_matches if m[2] == str(fold)), None)
test = next((m[0] for m in test_matches if m[2] == str(fold)), None)
if train and test:
paths['train'].append(train)
paths['test'].append(test)
fold += 1
else:
fold = -1
assert len(paths) > 0, f"No dataset file found in {dataset}: they should follow the naming xxxx_train.ext, xxxx_test.ext or xxxx_train_0.ext, xxxx_test_0.ext, xxxx_train_1.ext, ..."
return paths
elif is_valid_url(dataset):
cached_file = os.path.join(self._cache_dir, os.path.basename(dataset))
if not os.path.exists(cached_file): # don't download if previously done
assert url_exists(dataset), f"Invalid path/url: {dataset}"
download_file(dataset, cached_file)
return self._extract_train_test_paths(cached_file)
else:
raise ValueError(f"Invalid dataset description: {dataset}")
class FileDataset(Dataset):
def __init__(self, train: Datasplit, test: Datasplit,
target: Union[int, str] = None, features: List[Union[ns, str]] = None, type: str = None):
super().__init__()
self._train = train
self._test = test
self._target = target
self._features = features
self._type = type
@property
def type(self) -> DatasetType:
assert self.target is not None
return (DatasetType[self._type] if self._type is not None
else DatasetType.regression if self.target.values is None
else DatasetType.binary if len(self.target.values) == 2
else DatasetType.multiclass)
@property
def train(self) -> Datasplit:
return self._train
@property
def test(self) -> Datasplit:
return self._test
@property
def features(self) -> List[Feature]:
return self._get_metadata('features')
@property
def target(self) -> Feature:
return self._get_metadata('target')
@memoize
def _get_metadata(self, prop):
meta = self._train.load_metadata()
return meta[prop]
class FileDatasplit(Datasplit):
def __init__(self, dataset: FileDataset, format: str, path: str):
super().__init__(dataset, format)
self._path = path
self._data = {format: path}
def data_path(self, format):
supported_formats = [cls.format for cls in __file_converters__]
if format not in supported_formats:
name = split_path(self._path).basename
raise ValueError(f"Dataset {name} is only available in one of {supported_formats} formats.")
return self._get_data(format)
@lazy_property
def data(self):
# use codecs for unicode support: path = codecs.load(self._path, 'rb', 'utf-8')
log.debug("Loading datasplit %s.", self.path)
return self.load_data()
@abstractmethod
def load_data(self):
pass
@abstractmethod
def load_metadata(self):
pass
def _get_data(self, fmt):
if fmt not in self._data:
converter = _get_file_convert_cls(fmt)()
self._data[fmt] = converter.convert(self)
return self._data[fmt]
def _find_target_feature(self, features: List[Feature]):
target = self.dataset._target
return (features[target] if isinstance(target, int)
else next(f for f in features if f.name == target) if isinstance(target, str)
else next((f for f in features if f.name.lower() in ['target', 'class']), None) or features[-1])
def _set_feature_as_target(self, target: Feature):
# for classification problems, ensure that the target appears as categorical
ds_type = self.dataset._type
if ds_type and DatasetType[ds_type] in [DatasetType.binary, DatasetType.multiclass]:
if not target.is_categorical():
log.warning("Forcing target column %s as 'category' for classification problems: was originally detected as '%s'.",
target.name, target.data_type)
# target.data_type = 'category'
target.is_target = True
class ArffDataset(FileDataset):
def __init__(self, train_path, test_path,
target=None, features=None, type=None):
# todo: handle auto-split (if test_path is None): requires loading the training set, split, save
super().__init__(ArffDatasplit(self, train_path), ArffDatasplit(self, test_path),
target=target, features=features, type=type)
class ArffDatasplit(FileDatasplit):
def __init__(self, dataset, path):
super().__init__(dataset, format='arff', path=path)
self._ds = None
def _ensure_loaded(self):
if self._ds is None:
with open(self.path) as f:
self._ds = arff.load(f)
@profile(logger=log)
def load_metadata(self):
self._ensure_loaded()
attrs = self._ds['attributes']
# arff loader types = ['NUMERIC', 'REAL', 'INTEGER', 'STRING']
to_feature_type = lambda arff_type: ('category' if isinstance(arff_type, (list, set))
else 'string' if arff_type.lower() == 'string'
else 'int' if arff_type.lower() == 'integer'
else 'float' if arff_type.lower() == 'real'
else 'number' if arff_type.lower() == 'numeric'
else 'object')
features = [Feature(i, attr[0], to_feature_type(attr[1]))
for i, attr in enumerate(attrs)]
target = self._find_target_feature(features)
self._set_feature_as_target(target)
df = to_data_frame(self._ds['data'])
for f in features:
col = df.iloc[:, f.index]
f.has_missing_values = col.hasnans
if f.is_categorical():
arff_type = attrs[f.index][1]
assert isinstance(arff_type, (list, set))
f.values = sorted(arff_type)
meta = dict(
features=features,
target=target
)
log.debug("Metadata for dataset %s: %s", self.path, meta)
return meta
@profile(logger=log)
def load_data(self):
self._ensure_loaded()
columns = [f.name for f in self.dataset.features]
df = pd.DataFrame(self._ds['data'], columns=columns)
dt_conversions = {f.name: f.data_type
for f in self.dataset.features
if f.data_type == 'category'}
if dt_conversions:
df = df.astype(dt_conversions, copy=False)
return df
def release(self, properties=None):
super().release(properties)
self._ds = None
class CsvDataset(FileDataset):
def __init__(self, train_path, test_path,
target=None, features=None, type=None):
# todo: handle auto-split (if test_path is None): requires loading the training set, split, save
super().__init__(CsvDatasplit(self, train_path), CsvDatasplit(self, test_path),
target=target, features=features, type=type)
self._dtypes = None
class CsvDatasplit(FileDatasplit):
def __init__(self, dataset, path):
super().__init__(dataset, format='csv', path=path)
self._ds = None
def _ensure_loaded(self):
if self._ds is None:
if self.dataset._dtypes is None:
df = read_csv(self.path)
# df = df.convert_dtypes()
dt_conversions = {name: 'category'
for name, dtype in zip(df.dtypes.index, df.dtypes.values)
if pat.is_string_dtype(dtype) or pat.is_object_dtype(dtype)}
# we could be a bit more clever in the future and convert 'string' to category iff len(distinct values) << nrows
if dt_conversions:
df = df.astype(dt_conversions, copy=False)
self._ds = df
self.dataset._dtypes = self._ds.dtypes
else:
self._ds = read_csv(self.path, dtype=self.dataset._dtypes.to_dict())
@profile(logger=log)
def load_metadata(self):
self._ensure_loaded()
dtypes = self.dataset._dtypes
to_feature_type = lambda dt: ('int' if pat.is_integer_dtype(dt)
else 'float' if pat.is_float_dtype(dt)
else 'number' if pat.is_numeric_dtype(dt)
else 'category' if pat.is_categorical_dtype(dt)
else 'string' if pat.is_string_dtype(dt)
# else 'datetime' if pat.is_datetime64_dtype(dt)
else 'object')
features = [Feature(i, col, to_feature_type(dtypes[i]))
for i, col in enumerate(self._ds.columns)]
for f in features:
col = self._ds.iloc[:, f.index]
f.has_missing_values = col.hasnans
if f.is_categorical():
f.values = sorted(self._ds.dtypes[f.name].categories.values)
target = self._find_target_feature(features)
self._set_feature_as_target(target)
meta = dict(
features=features,
target=target
)
log.debug("Metadata for dataset %s: %s", self.path, meta)
return meta
@profile(logger=log)
def load_data(self):
self._ensure_loaded()
return self._ds
def release(self, properties=None):
super().release(properties)
self._ds = None
class FileConverter:
format = None
def __init__(self) -> None:
super().__init__()
def convert(self, split: FileDatasplit) -> str:
sp = split_path(split._path)
sp.extension = self.format
target_path = path_from_split(sp)
if not os.path.isfile(target_path):
self._write_file(split.data, target_path)
return target_path
@abstractmethod
def _write_file(self, df, path):
pass
class ArffConverter(FileConverter):
format = 'arff'
def _write_file(self, df, path):
name = split_path(path).basename
with open(path, 'w') as file:
description = f"Arff dataset file generated by automlbenchmark from {name}."
attributes = [(c,
('INTEGER' if pat.is_integer_dtype(dt)
else 'REAL' if pat.is_float_dtype(dt)
else 'NUMERIC' if pat.is_numeric_dtype(dt)
else sorted(dt.categories.values) if | pat.is_categorical_dtype(dt) | pandas.api.types.is_categorical_dtype |
import pytest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal, assert_index_equal
from pdblp import pdblp
import blpapi
import os
@pytest.fixture(scope="module")
def port(request):
return request.config.getoption("--port")
@pytest.fixture(scope="module")
def host(request):
return request.config.getoption("--host")
@pytest.fixture(scope="module")
def timeout(request):
return request.config.getoption("--timeout")
@pytest.fixture(scope="module")
def con(host, port, timeout):
return pdblp.BCon(host=host, port=port, timeout=timeout).start()
@pytest.fixture(scope="module")
def data_path():
return os.path.join(os.path.dirname(__file__), "data/")
def pivot_and_assert(df, df_exp, with_date=False):
# as shown below, since the raw data returned from bbg is an array
# with unknown ordering, there is no guruantee that the `position` will
# always be the same so pivoting prior to comparison is necessary
#
# fieldData = {
# INDX_MWEIGHT[] = {
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "BON8"
# Percentage Weight = 2.410000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "C N8"
# Percentage Weight = 6.560000
# }
# INDX_MWEIGHT = {
# Member Ticker and Exchange Code = "CLN8"
# Percentage Weight = 7.620000
# }
# }
# }
name_cols = list(df_exp.name.unique())
sort_cols = list(df_exp.name.unique())
index_cols = ["name", "position", "field", "ticker"]
if with_date:
sort_cols.append("date")
index_cols.append("date")
df = (df.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
df_exp = (df_exp.set_index(index_cols).loc[:, "value"]
.unstack(level=0).reset_index().drop(columns="position")
.sort_values(by=sort_cols, axis=0))
# deal with mixed types resulting in str from csv read
for name in name_cols:
try:
df_exp.loc[:, name] = df_exp.loc[:, name].astype(float)
except ValueError:
pass
for name in name_cols:
try:
df.loc[:, name] = df.loc[:, name].astype(float)
except ValueError:
pass
if with_date:
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"],
format="%Y%m%d")
df_exp.loc[:, "date"] = pd.to_datetime(df_exp.loc[:, "date"],
format="%Y%m%d")
assert_frame_equal(df, df_exp)
ifbbg = pytest.mark.skipif(pytest.config.cache.get('offline', False),
reason="No BBG connection, skipping tests")
@ifbbg
def test_bdh_empty_data_only(con):
df = con.bdh(
tickers=['1437355D US Equity'],
flds=['PX_LAST', 'VOLUME'],
start_date='20180510',
end_date='20180511',
longdata=False
)
df_exp = pd.DataFrame(
[], index=pd.DatetimeIndex([], name='date'),
columns=pd.MultiIndex.from_product([[], []],
names=('ticker', 'field'))
)
assert_frame_equal(df, df_exp)
@ifbbg
def test_bdh_empty_data_with_non_empty_data(con):
df = con.bdh(
tickers=['AAPL US Equity', '1437355D US Equity'],
flds=['PX_LAST', 'VOLUME'],
start_date='20180510',
end_date='20180511',
longdata=False
)
df_exp = pd.DataFrame(
[[190.04, 27989289.0], [188.59, 26212221.0]],
index=pd.DatetimeIndex(["20180510", "20180511"], name="date"),
columns=pd.MultiIndex.from_product([["AAPL US Equity"],
["PX_LAST", "VOLUME"]],
names=["ticker", "field"])
)
assert_frame_equal(df, df_exp)
@ifbbg
def test_bdh_partially_empty_data(con):
df = con.bdh(
tickers=['XIV US Equity', 'AAPL US Equity'],
flds=['PX_LAST'],
start_date='20180215',
end_date='20180216',
longdata=False
)
df_exp = pd.DataFrame(
[[6.04, 172.99], [np.NaN, 172.43]],
index=pd.DatetimeIndex(["20180215", "20180216"], name="date"),
columns=pd.MultiIndex.from_product(
[["XIV US Equity", "AAPL US Equity"], ["PX_LAST"]],
names=["ticker", "field"]
)
)
| assert_frame_equal(df, df_exp) | pandas.util.testing.assert_frame_equal |
import glob
from evaluation_metrics.inception_score import calculate_inception_score_given_tensor
from evaluation_metrics.fid_score import calculate_fid_given_tensor
from models.sync_batchnorm import DataParallelWithCallback
import pandas as pd
import torch
import os
def inceptions_score_fid_all(base_dir, generator_func, z_sampling_func, y_sampling_func, use_data_parallel,
n_minibatch_sampling, refrence_fid_statistics_path):
model_paths = sorted(glob.glob(base_dir + "/models/gen_ema*.pytorch"))
epochs = []
inception_scores = []
fids = []
print(f"Calculating All Inception Scores / FIDs... (# {len(model_paths)})")
for i, path in enumerate(model_paths):
model = generator_func()
model.load_state_dict(torch.load(path))
if use_data_parallel:
model = DataParallelWithCallback(model)
# generate images
with torch.no_grad():
imgs = []
for _ in range(n_minibatch_sampling):
z = z_sampling_func()
y = y_sampling_func()
x = model(z, y)
imgs.append(x)
imgs = torch.cat(imgs, dim=0).cpu()
# eval_is
iscore, _ = calculate_inception_score_given_tensor(imgs)
# fid
fid_score = calculate_fid_given_tensor(imgs, refrence_fid_statistics_path)
# epoch
epoch = int(os.path.basename(path).replace("gen_ema_epoch_", "").replace(".pytorch", ""))
epochs.append(epoch)
inception_scores.append(iscore)
fids.append(fid_score)
print(f"epoch = {epoch}, inception_score = {iscore}, fid = {fid_score} [{i+1}/{len(model_paths)}]")
df = | pd.DataFrame({"epoch": epochs, "inception_score": inception_scores, "fid": fids}) | pandas.DataFrame |
import investpy
import pandas as pd
from lxml import html
import requests
import os
from finetwork.utils._utils import isnotebook
if isnotebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
import json
import numpy as np
import datetime
import trading_calendars as tc
class FinData:
def __init__(self,
from_date,
to_date,
tickers,
country,
market=None,
index=False):
self.from_date = from_date
self.to_date = to_date
self.country = country
self.tickers = tickers
self.market = market
self.index = index
def get_data(self,
dividends_correction = True,
save = True,
save_format = 'csv',
save_path = 'data/',
trading_day_thresh = 0.55):
index = self.index
from_date = self.from_date
to_date = self.to_date
market = self.market
tickers = self.tickers
country = self.country
if index:
tickers=[tickers]
data_dict = {}
sectors_dict = {}
if market == 'world_indices':
cal = tc.get_calendar('NYSE')
else:
cal = tc.get_calendar(market)
cal_trading = cal.sessions_in_range(pd.to_datetime(from_date),
pd.to_datetime(to_date))
cal_trading = [
datetime.date.strftime(i, "%Y-%m-%d") for i in cal_trading
]
from_date = datetime.datetime.strptime(
min(cal_trading),
'%Y-%m-%d'
).strftime('%d/%m/%Y')
to_date = datetime.datetime.strptime(
max(cal_trading),
'%Y-%m-%d'
).strftime('%d/%m/%Y')
n_trading = len(cal_trading)
for i, code in (enumerate(tqdm(tickers))):
try:
if index:
data = investpy.indices.get_index_historical_data(
index=code,
country=country,
from_date=from_date,
to_date=to_date
)
else:
data = investpy.get_stock_historical_data(
stock=code,
country=country,
from_date=from_date,
to_date=to_date
)
if datetime.datetime.strptime(
str(data.index.min().date()), '%Y-%m-%d'
).strftime('%d/%m/%Y')==(from_date)\
and datetime.datetime.strptime(
str(data.index.max().date()), '%Y-%m-%d'
).strftime('%d/%m/%Y')==(to_date)\
and | pd.to_datetime(data.index) | pandas.to_datetime |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64)
tm.assert_series_equal(rs, df.y)
def _check_data(self, xp, rs):
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_array_equal(xpdata, rsdata)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, sharex=True, legend=True)
for ax in axes:
self.assert_(ax.get_legend() is not None)
axes = df.plot(subplots=True, sharex=True)
for ax in axes[:-2]:
[self.assert_(not label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_xticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_yticklabels()]
axes = df.plot(subplots=True, sharex=False)
for ax in axes:
[self.assert_(label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
@slow
def test_plot_scatter(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot, x='x', y='y', kind='scatter')
_check_plot_works(df.plot, x=1, y=2, kind='scatter')
with tm.assertRaises(ValueError):
df.plot(x='x', kind='scatter')
with tm.assertRaises(ValueError):
df.plot(y='y', kind='scatter')
@slow
def test_plot_bar(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot, kind='bar')
_check_plot_works(df.plot, kind='bar', legend=False)
_check_plot_works(df.plot, kind='bar', subplots=True)
_check_plot_works(df.plot, kind='bar', stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot, kind='bar')
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
_check_plot_works(df.plot, kind='bar')
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', stacked='True', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width() / 2)
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width())
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True, log=True)
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot(log=True, kind='bar',
subplots=True)
assert_array_equal(ax[0].yaxis.get_ticklocs(), expected)
assert_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot)
_check_plot_works(df.boxplot, column=['one', 'two'])
_check_plot_works(df.boxplot, column=['one', 'two'], by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
_check_plot_works(df.boxplot, by='indic')
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting.boxplot, df['one'])
_check_plot_works(df.boxplot, notch=1)
_check_plot_works(df.boxplot, by='indic', notch=1)
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
_check_plot_works(df.boxplot, by='X')
@slow
def test_kde(self):
_skip_if_no_scipy()
df = DataFrame(randn(100, 4))
_check_plot_works(df.plot, kind='kde')
_check_plot_works(df.plot, kind='kde', subplots=True)
ax = df.plot(kind='kde')
self.assert_(ax.get_legend() is not None)
axes = df.plot(kind='kde', logy=True, subplots=True)
for ax in axes:
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_hist(self):
import matplotlib.pyplot as plt
df = DataFrame(randn(100, 4))
_check_plot_works(df.hist)
_check_plot_works(df.hist, grid=False)
# make sure layout is handled
df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
axes = df.hist(grid=False)
self.assert_(not axes[1, 1].get_visible())
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
_check_plot_works(df.hist)
# make sure sharex, sharey is handled
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
_check_plot_works(df.hist, figsize=(8, 10))
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 20
xrot, yrot = 30, 30
ax = ser.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
xf, yf = 20, 20
xrot, yrot = 30, 30
axes = df.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
for i, ax in enumerate(axes.ravel()):
if i < len(df.columns):
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
tm.close()
# make sure kwargs to hist are handled
ax = ser.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self.assertEqual(ax.get_yscale(), 'log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with | tm.assertRaises(AttributeError) | pandas.util.testing.assertRaises |
"""Create symlinks to images grouped by label.
"""
import argparse
import json
import logging
import os
import psutil
import time
from pathlib import Path
from typing import Dict, List, Set
import pandas as pd
from tqdm import tqdm
from autofocus.util import discard_duplicate_rows
def main(detections_path: str,
outdir: str,
labelmap_path: str=None,
label_priority_path: str=None,
keep_unresolved: bool=False,
):
detections = | pd.read_csv(detections_path) | pandas.read_csv |
import numpy as np
import pandas as pd
from fbprophet import Prophet
Charlottenburg_Wilmersdorf = pd.read_csv('DFs/TG_Charlottenburg-Wilmersdorf.txt', sep=",", header=0)
Friedrichshain_Kreuzberg = pd.read_csv('DFs/TG_Friedrichshain-Kreuzberg.txt', sep=",", header=0)
Lichtenberg = pd.read_csv('DFs/TG_Lichtenberg.txt', sep=",", header=0)
Marzahn_Hellersdorf = pd.read_csv('DFs/TG_Marzahn-Hellersdorf.txt', sep=",", header=0)
Mitte = pd.read_csv('DFs/TG_Mitte.txt', sep=",", header=0)
NeuKoln = pd.read_csv('DFs/TG_NeuKoln.txt', sep=",", header=0)
Pankow = | pd.read_csv('DFs/TG_Pankow.txt', sep=",", header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Tools for correcting energy-balance components to improve energy balance
closure and other data management, validation and scientific analysis tools.
"""
from pathlib import Path
import xarray
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from refet.calcs import _ra_daily, _rso_simple
import refet
from .data import Data
from .plot import Plot
from .util import monthly_resample, Convert
class QaQc(Plot, Convert):
"""
Numerical routines for correcting daily energy balance closure
for eddy covariance data and other data analysis tools.
Two routines are provided for improving energy balance closure by adjusting
turbulent fluxes, latent energy and sensible heat, the Energy Balance Ratio
method (modified from `FLUXNET
<https://fluxnet.fluxdata.org/data/fluxnet2015-dataset/data-processing/>`__)
and the Bowen Ratio method.
The :obj:`QaQc` object also has multiple tools for temporal frequency
aggregation and resampling, estimation of climatic and statistical
variables (e.g. ET and potential shortwave radiation), downloading gridMET
reference ET, managing data and metadata, interactive validation plots, and
managing a structure for input and output data files. Input data is
expected to be a :obj:`.Data` instance or a
:obj:`pandas.DataFrame`.
Keyword Arguments:
data (:obj:`.Data`): :obj:`.Data` instance to create :obj:`.QaQc`
instance.
drop_gaps (bool): default :obj:`True`. If :obj:`True` automatically
filter variables on days with sub-daily measurement gaps less than
``daily_frac``.
daily_frac (float): default 1.00. Fraction of sub-daily data required
otherwise the daily value will be filtered out if ``drop_gaps`` is
:obj:`True`. E.g. if ``daily_frac = 0.5`` and the input data is
hourly, then data on days with less than 12 hours of data will be
forced to null within :attr:`QaQc.df`. This is important because
systematic diurnal gaps will affect the autmoatic resampling that
occurs when creating a :obj:`QaQc` instance and the daily data is
used in closure corrections, other calculations, and plots. If
sub-daily linear interpolation is applied to energy balance
variables the gaps are counted *after* the interpolation.
max_interp_hours (None or float): default 2. Length of largest gap to
fill with linear interpolation in energy balance variables if
input datas temporal frequency is less than daily. This value will
be used to fill gaps when :math:`Rn > 0` or :math:`Rn` is missing
during each day.
max_interp_hours_night (None or float): default 4. Length of largest gap
to fill with linear interpolation in energy balance variables if
input datas temporal frequency is less than daily when
:math:`Rn < 0` within 12:00PM-12:00PM daily intervals.
Attributes:
agg_dict (dict): Dictionary with internal variable names as keys and
method of temporal resampling (e.g. "mean" or "sum") as values.
config (:obj:`configparser.ConfigParser`): Config parser instance
created from the data within the config.ini file.
config_file (:obj:`pathlib.Path`): Absolute path to config.ini file
used for initialization of the :obj:`fluxdataqaqc.Data` instance
used to create the :obj:`QaQc` instance.
corrected (bool): False until an energy balance closure correction has
been run by calling :meth:`QaQc.correct_data`.
corr_methods (tuple): List of Energy Balance Closure correction routines
usable by :meth:`QaQc.correct_data`.
corr_meth (str or None): Name of most recently applied energy balance
closure correction.
elevation (float): Site elevation in meters.
gridMET_exists (bool): True if path to matching gridMET time series
file exists on disk and has time series for reference ET and
precipitation and the dates for these fully overlap with the energy
balance variables, i.e. the date index of :attr:`QaQc.df`.
gridMET_meta (dict): Dictionary with information for gridMET variables
that may be downloaded using :meth:`QaQc.download_gridMET`.
inv_map (dict): Dictionary with input climate file names as keys and
internal names as values. May only include pairs when they differ.
latitude (float): Site latitude in decimal degrees.
longitude (float): Site longitude in decimal degrees.
out_dir (pathlib.Path): Default directory to save output of
:meth:`QaQc.write` or :meth:`QaQc.plot` methods.
n_samples_per_day (int): If initial time series temporal frequency is
less than 0 then this value will be updated to the number of samples
detected per day, useful for post-processing based on the count of
sub-daily gaps in energy balance variables, e.g. "LE_subday_gaps".
plot_file (pathlib.Path or None): path to plot file once it is
created/saved by :meth:`QaQc.plot`.
site_id (str): Site ID.
temporal_freq (str): Temporal frequency of initial (as found in input
climate file) data as determined by :func:`pandas.infer_freq`.
units (dict): Dictionary with internal variable names as keys and
units as found in config as values.
variables (dict): Dictionary with internal variable names as keys and
names as found in the input data as values.
Note:
Upon initialization of a :obj:`QaQc` instance the temporal frequency of
the input data checked using :func:`pandas.infer_freq` which does not
always correctly parse datetime indices, if it is not able to correctly
determine the temporal frequency the time series will be resampled to
daily frequency but if it is in fact already at daily frequency the data
will be unchanged. In this case the :attr:`QaQc.temporal_freq` will be
set to "na".
"""
# dictionary used for temporally aggregating variables
agg_dict = {
'ASCE_ETo': 'sum',
'ASCE_ETr': 'sum',
'energy': 'mean',
'flux': 'mean',
'flux_corr': 'mean',
'br': 'mean',
'ET': 'sum',
'ET_corr': 'sum',
'ET_gap': 'sum',
'ET_fill': 'sum',
'ET_fill_val': 'sum',
'ET_user_corr': 'sum',
'ebr': 'mean',
'ebr_corr': 'mean',
'ebr_user_corr': 'mean',
'ebr_5day_clim': 'mean',
'gridMET_ETr': 'sum',
'gridMET_ETo': 'sum',
'gridMET_prcp': 'sum',
'lw_in': 'mean',
't_avg': 'mean',
't_max': 'mean',
't_min': 'mean',
't_dew': 'mean',
'rso': 'mean',
'sw_pot': 'mean',
'sw_in': 'mean',
'vp': 'mean',
'vpd': 'mean',
'ppt': 'sum',
'ppt_corr': 'sum',
'ws': 'mean',
'Rn': 'mean',
'Rn_subday_gaps': 'sum',
'rh' : 'mean',
'sw_out': 'mean',
'lw_out': 'mean',
'G': 'mean',
'G_subday_gaps': 'sum',
'LE': 'mean',
'LE_corr': 'mean',
'LE_subday_gaps': 'sum',
'LE_user_corr': 'mean',
'H': 'mean',
'H_corr': 'mean',
'H_subday_gaps': 'sum',
'H_user_corr': 'mean',
}
# EBR correction methods available
corr_methods = (
'ebr',
'br',
'lin_regress'
)
# gridMET dict, keys are names which can be passed to download_gridMET
gridMET_meta = {
'ETr': {
'nc_suffix': 'agg_met_etr_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_reference_evapotranspiration_alfalfa',
'rename': 'gridMET_ETr',
'units': 'mm'
},
'pr': {
'nc_suffix': 'agg_met_pr_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'precipitation_amount',
'rename': 'gridMET_prcp',
'units': 'mm'
},
'pet': {
'nc_suffix': 'agg_met_pet_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_reference_evapotranspiration_grass',
'rename': 'gridMET_ETo',
'units': 'mm'
},
'sph': {
'nc_suffix': 'agg_met_sph_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_specific_humidity',
'rename': 'gridMET_q',
'units': 'kg/kg'
},
'srad': {
'nc_suffix': 'agg_met_srad_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_shortwave_radiation_at_surface',
'rename': 'gridMET_srad',
'units': 'w/m2'
},
'vs': {
'nc_suffix': 'agg_met_vs_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_wind_speed',
'rename': 'gridMET_u10',
'units': 'm/s'
},
'tmmx': {
'nc_suffix': 'agg_met_tmmx_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_maximum_temperature',
'rename': 'gridMET_tmax',
'units': 'K'
},
'tmmn': {
'nc_suffix': 'agg_met_tmmn_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_minimum_temperature',
'rename': 'gridMET_tmin',
'units': 'K'
},
}
# all potentially calculated variables for energy balance corrections
_eb_calc_vars = (
'br',
'br_user_corr',
'energy',
'energy_corr',
'ebr',
'ebr_corr',
'ebr_user_corr',
'ebc_cf',
'ebr_5day_clim',
'flux',
'flux_corr',
'flux_user_corr',
'G_corr',
'H_corr',
'LE_corr',
'Rn_corr'
)
# potentially calculated variables for ET
_et_calc_vars = (
'ET',
'ET_corr',
'ET_user_corr'
)
# potentially calculated ET gap fill variables
_et_gap_fill_vars = (
'ET_gap',
'ET_fill',
'ET_fill_val',
'ETrF',
'ETrF_filtered',
'EToF',
'EToF_filtered'
)
def __init__(self, data=None, drop_gaps=True, daily_frac=1.00,
max_interp_hours=2, max_interp_hours_night=4):
if isinstance(data, Data):
self.config_file = data.config_file
self.config = data.config
data.df.head();# need to access to potentially calc vp/vpd
self._df = data.df
self.variables = data.variables
self.units = data.units
self.elevation = data.elevation
self.latitude = data.latitude
self.longitude = data.longitude
self.out_dir = data.out_dir
self.site_id = data.site_id
# flip variable naming dict for internal use
self.inv_map = {
v: k for k, v in self.variables.items() if (
not k in self._df.columns
)
}
# using 'G' in multiple g plot may overwrite G name internally
if not 'G' in self.inv_map.values():
user_G_name = self.variables.get('G')
if user_G_name:
self.inv_map[user_G_name] = 'G'
# data will be loaded if it has not yet via Data.df
self.temporal_freq = self._check_daily_freq(
drop_gaps, daily_frac, max_interp_hours, max_interp_hours_night
)
# check units, convert if possible for energy balance, ppt, Rs, vp,
self._check_convert_units()
self._check_gridMET()
# assume energy balance vars exist, will be validated upon corr
self._has_eb_vars = True
elif data is not None:
print('{} is not a valid input type'.format(type(data)))
raise TypeError("Must assign a fluxdataqaqc.data.Data object")
else:
self._df = None
self.corrected = False
self.corr_meth = None
def daily_ASCE_refET(self, reference='short', anemometer_height=None):
"""
Calculate daily ASCE standardized short (ETo) or tall (ETr) reference
ET from input data and wind measurement height.
The resulting time series will automatically be merged into the
:attr:`.Data.df` dataframe named "ASCE_ETo" or "ASCE_ETr" respectively.
Keyword Arguments:
reference (str): default "short", calculate tall or short ASCE
reference ET.
anemometer_height (float or None): wind measurement height in meters
, default :obj:`None`. If :obj:`None` then look for the
"anemometer_height" entry in the **METADATA** section of the
config.ini, if not there then print a warning and use 2 meters.
Returns:
:obj:`None`
Note:
If the hourly ASCE variables were prior calculated from a
:obj:`.Data` instance they will be overwritten as they are saved
with the same names.
"""
df = self.df.rename(columns=self.inv_map)
req_vars = ['vp', 'ws', 'sw_in', 't_min', 't_max']
if not set(req_vars).issubset(df.columns):
print('Missing one or more required variables, cannot compute')
return
if anemometer_height is None:
anemometer_height = self.config.get(
'METADATA', 'anemometer_height', fallback=None
)
if anemometer_height is None:
print(
'WARNING: anemometer height was not given and not found in '
'the config files metadata, proceeding with height of 2 m'
)
anemometer_height = 2
# RefET will convert to MJ-m2-hr
input_units = {
'rs': 'w/m2'
}
length = len(df.t_min)
tmin = df.t_min
tmax = df.t_max
rs = df.sw_in
ea = df.vp
uz = df.ws
zw = np.full(length, anemometer_height)
lat = np.full(length, self.latitude)
doy = df.index.dayofyear
elev = np.full(length, self.elevation)
REF = refet.Daily(
tmin,
tmax,
ea,
rs,
uz,
zw,
elev,
lat,
doy,
method='asce',
input_units=input_units,
)
if reference == 'short':
ret = REF.eto()
name = 'ASCE_ETo'
elif reference == 'tall':
ret = REF.etr()
name = 'ASCE_ETr'
# can add directly into QaQc.df
df[name] = ret
self._df = df.rename(columns=self.variables)
self.variables[name] = name
self.units[name] = 'mm'
def _check_convert_units(self):
"""
Verify if units are recognized for variables in QaQc.allowable_units,
next verify that they have required units as in QaQc.required_units
if not convert them.
Conversions are handled by util.Convert.convert class method.
"""
# force all input units to lower case
for k, v in self.units.items():
self.units[k] = v.lower()
# can add check/rename unit aliases, e.g. C or c or celcius, etc...
df = self._df.rename(columns=self.inv_map)
for v, u in self.units.items():
if not v in QaQc.required_units.keys():
# variable is not required to have any particular unit, skip
continue
elif not u in QaQc.allowable_units[v]:
print('ERROR: {} units are not recognizable for var: {}\n'
'allowable input units are: {}\nNot converting.'.format(
u, v, ','.join(QaQc.allowable_units[v])
)
)
elif not u == QaQc.required_units[v]:
# do conversion, update units
# pass variable, initial unit, unit to be converted to, df
df = Convert.convert(v, u, QaQc.required_units[v], df)
self.units[v] = QaQc.required_units[v]
self._df = df
def _check_gridMET(self):
"""
Check if gridMET has been downloaded (file path in config), if so
also check if dates fully intersect those of station data. If both
conditions are not met then update :attr:`gridMET_exists` to False
otherwise assign True.
Arguments:
None
Returns:
None
"""
gridfile = self.config.get('METADATA','gridMET_file_path',fallback=None)
if gridfile is None:
self.gridMET_exists = False
else:
try:
grid_df = pd.read_csv(
gridfile, parse_dates=True, index_col='date'
)
gridMET_dates = grid_df.index
station_dates = self.df.index
# add var names and units to attributes
for val in grid_df.columns:
meta = [
v for k,v in QaQc.gridMET_meta.items() if \
v['rename'] == val
][0]
self.variables[meta['rename']] = meta['rename']
self.units[meta['rename']] = meta['units']
# flag False if ETr was not downloaded for our purposes
if not {'gridMET_ETr','gridMET_ETo'}.issubset(grid_df.columns):
self.gridMET_exists = False
elif station_dates.isin(gridMET_dates).all():
self.gridMET_exists = True
# some gridMET exists but needs to be updated for coverage
else:
self.gridMET_exists = False
except:
print('WARNING: unable to find/read gridMET file\n {}'.format(
gridfile)
)
self.gridMET_exists = False
def download_gridMET(self, variables=None):
"""
Download reference ET (alfalfa and grass) and precipitation from
gridMET for all days in flux station time series by default.
Also has ability to download other specific gridMET variables by
passing a list of gridMET variable names. Possible variables and their
long form can be found in :attr:`QaQc.gridMET_meta`.
Upon download gridMET time series for the nearest gridMET cell will be
merged into the instances dataframe attibute :attr:`QaQc.df` and all
gridMET variable names will have the prefix "gridMET\_" for
identification.
The gridMET time series file will be saved to a subdirectory called
"gridMET_data" within the directory that contains the config file
for the current :obj:`QaQc` instance and named with the site ID and
gridMET cell centroid lat and long coordinates in decimal degrees.
Arguments:
variables (None, str, list, or tuple): default None. List of gridMET
variable names to download, if None download ETr and
precipitation. See the keys of the :attr:`QaQc.gridMET_meta`
dictionary for a list of all variables that can be downloaded
by this method.
Returns:
:obj:`None`
Note:
Any previously downloaded gridMET time series will be overwritten
when calling the method, however if using the the gap filling
method of the "ebr" correction routine the download will not
overwrite currently existing data so long as gridMET reference ET
and precipitation is on disk and its path is properly set in the
config file.
"""
# opendap thredds server
server_prefix =\
'http://thredds.northwestknowledge.net:8080/thredds/dodsC/'
if variables is None:
variables = ['ETr', 'pet', 'pr']
elif not isinstance(variables, (str,list,tuple)):
print(
'ERROR: {} is not a valid gridMET variable '
'or list of variable names, valid options:'
'\n{}'.format(
variables, ', '.join([v for v in QaQc.gridMET_meta])
)
)
return
if isinstance(variables, str):
variables = list(variables)
station_dates = self.df.index
grid_dfs = []
for i,v in enumerate(variables):
if not v in QaQc.gridMET_meta:
print(
'ERROR: {} is not a valid gridMET variable, '
'valid options: {}'.format(
v, ', '.join([v for v in QaQc.gridMET_meta])
)
)
continue
meta = QaQc.gridMET_meta[v]
self.variables[meta['rename']] = meta['rename']
self.units[meta['rename']] = meta['units']
print('Downloading gridMET var: {}\n'.format(meta['name']))
netcdf = '{}{}'.format(server_prefix, meta['nc_suffix'])
ds = xarray.open_dataset(netcdf).sel(
lon=self.longitude, lat=self.latitude, method='nearest'
).drop('crs')
df = ds.to_dataframe().loc[station_dates].rename(
columns={meta['name']:meta['rename']}
)
df.index.name = 'date' # ensure date col name is 'date'
# on first variable (if multiple) grab gridcell centroid coords
if i == 0:
lat_centroid = df.lat[0]
lon_centroid = df.lon[0]
df.drop(['lat', 'lon'], axis=1, inplace=True)
grid_dfs.append(df)
# combine data
df = pd.concat(grid_dfs, axis=1)
# save gridMET time series to CSV in subdirectory where config file is
gridMET_file = self.config_file.parent.joinpath(
'gridMET_data'
).joinpath('{}_{:.4f}N_{:.4f}W.csv'.format(
self.site_id, lat_centroid, lon_centroid
)
)
gridMET_file.parent.mkdir(parents=True, exist_ok=True)
self.config.set(
'METADATA', 'gridMET_file_path', value=str(gridMET_file)
)
df.to_csv(gridMET_file)
# rewrite config with updated gridMET file path
with open(str(self.config_file), 'w') as outf:
self.config.write(outf)
# drop previously calced vars for replacement, no join duplicates
self._df = _drop_cols(self._df, variables)
self._df = self._df.join(df)
self.gridMET_exists = True
def _check_daily_freq(self, drop_gaps, daily_frac, max_interp_hours,
max_interp_hours_night):
"""
Check temporal frequency of input Data, resample to daily if not already
Note:
If one or more sub-dauly values are missing for a day the entire
day will be replaced with a null (:obj:`numpy.nan`).
If user QC values for filtering data are present they will also be
resampled to daily means, however this should not be an issue as
the filtering step occurs in a :obj:`fluxdataqaqc.Data` object.
"""
# rename columns to internal names
df = self._df.rename(columns=self.inv_map)
if not isinstance(df, pd.DataFrame):
return
freq = pd.infer_freq(df.index)
second_day = df.index.date[2]
third_day = second_day + pd.Timedelta(1, unit='D')
# pd.infer_freq does not always work and may return None
if freq and freq > 'D':
pass
elif freq and freq < 'D':
print('\nThe input data temporal frequency appears to be less than',
'daily.')
# slice is transposed if only one date entry
elif len(df.loc[str(third_day)].index) == len(df.columns) and \
(df.loc[str(third_day)].index == df.columns).all():
print('\nInput temporal frequency is already daily.')
freq = 'D'
# add missing dates (if exist) for full time series records/plots
idx = pd.date_range(df.index.min(), df.index.max())
df = df.reindex(idx)
df.index.name = 'date'
elif freq is None:
freq = 'na'
if not freq == 'D':
# find frequency manually, optionally drop days with subdaily gaps
# see if two adj. dates exist, skip first day in case it is not full
max_times_in_day = len(df.loc[str(third_day)].index)
self.n_samples_per_day = max_times_in_day
downsample = False
if daily_frac > 1:
print('ERROR: daily_frac must be between 0 and 1, using 1')
daily_frac = 1
elif daily_frac < 0:
print('ERROR: daily_frac must be between 0 and 1, using 0')
daily_frac = 0
if not str(third_day) in df.index and drop_gaps:
print('WARNING: it looks like the input temporal frequency',
'is greater than daily, downsampling, proceed with' ,
'caution!\n')
downsample = True
energy_bal_vars = ['LE', 'H', 'Rn', 'G']
asce_std_interp_vars = ['t_avg','sw_in','ws','vp']
# for interpolation of energy balance variables if they exist
energy_bal_vars = list(
set(energy_bal_vars).intersection(df.columns)
)
asce_std_interp_vars = list(
set(asce_std_interp_vars).intersection(df.columns)
)
interp_vars = asce_std_interp_vars + energy_bal_vars
# add subday gap col for energy balance comps to sum daily gap count
for v in energy_bal_vars:
df['{}_subday_gaps'.format(v)] = False
df.loc[df[v].isna(), '{}_subday_gaps'.format(v)] = True
print('Data is being resampled to daily temporal frequency.')
sum_cols = [k for k,v in self.agg_dict.items() if v == 'sum']
sum_cols = list(set(sum_cols).intersection(df.columns))
mean_cols = set(df.columns) - set(sum_cols)
means = df.loc[:,mean_cols].apply(
pd.to_numeric, errors='coerce').resample('D').mean().copy()
# issue with resample sum of nans, need to drop first else 0
sums = df.loc[:,sum_cols].dropna().apply(
pd.to_numeric, errors='coerce').resample('D').sum()
if max_interp_hours is not None:
# linearly interpolate energy balance components only
max_gap = int(round(max_interp_hours/24 * max_times_in_day))
max_night_gap = int(
round(max_interp_hours_night/24 * max_times_in_day)
)
print(
'Linearly interpolating gaps in energy balance components '
'up to {} hours when Rn < 0 and up to {} hours when '
'Rn >= 0.'.format(max_interp_hours_night, max_interp_hours)
)
tmp = df[interp_vars].apply(
pd.to_numeric, errors='coerce'
)
if 'Rn' in energy_bal_vars:
grped_night = tmp.loc[
(tmp.Rn < 0) & (tmp.Rn.notna())
].copy()
grped_night.drop_duplicates(inplace=True)
grped_night = grped_night.groupby(
pd.Grouper(freq='24H', offset='12H')).apply(
lambda x: x.interpolate(
method='linear', limit=max_night_gap,
limit_direction='both', limit_area='inside'
)
)
grped_day = tmp.loc[(tmp.Rn >= 0) | (tmp.Rn.isna())].copy()
grped_day.drop_duplicates(inplace=True)
grped_day = grped_day.groupby(
pd.Grouper(freq='24H')).apply(
lambda x: x.interpolate(
method='linear', limit=max_gap,
limit_direction='both', limit_area='inside'
)
)
else:
grped_night = tmp.copy()
grped_night.drop_duplicates(inplace=True)
grped_night = grped_night.groupby(
pd.Grouper(freq='24H', offset='12H')).apply(
lambda x: x.interpolate(
method='linear', limit=max_night_gap,
limit_direction='both', limit_area='inside'
)
)
grped_day = tmp.copy()
grped_day.drop_duplicates(inplace=True)
grped_day = grped_day.groupby(
pd.Grouper(freq='24H')).apply(
lambda x: x.interpolate(
method='linear', limit=max_gap,
limit_direction='both', limit_area='inside'
)
)
# get full datetime index from grouper operation
if type(grped_night.index) is pd.MultiIndex:
grped_night.index = grped_night.index.get_level_values(1)
if type(grped_day.index) is pd.MultiIndex:
grped_day.index = grped_day.index.get_level_values(1)
interped = pd.concat([grped_day, grped_night])
if interped.index.duplicated().any():
interped = interped.loc[
~interped.index.duplicated(keep='first')
]
# overwrite non-interpolated daily means
means[interp_vars] =\
interped[interp_vars].resample('D').mean().copy()
if 't_avg' in interp_vars:
means['t_min'] = interped.t_avg.resample('D').min()
means['t_max'] = interped.t_avg.resample('D').max()
self.variables['t_min'] = 't_min'
self.units['t_min'] = self.units['t_avg']
self.variables['t_max'] = 't_max'
self.units['t_max'] = self.units['t_avg']
interp_vars = interp_vars + ['t_min','t_max']
interped[['t_min','t_max']] = means[['t_min','t_max']]
if drop_gaps:
# make sure round errors do not affect this value
n_vals_needed = int(round(max_times_in_day * daily_frac))
# don't overwrite QC flag columns
data_cols = [
c for c in df.columns if not c.endswith('_qc_flag')
]
# interpolate first before counting gaps
if max_interp_hours:
df[interp_vars] = interped[interp_vars].copy()
days_with_gaps = df[data_cols].groupby(
df.index.date).count() < n_vals_needed
df = means.join(sums)
# drop days based on fraction of missing subday samples
if not downsample and drop_gaps:
print(
'Filtering days with less then {}% or {}/{} sub-daily '
'measurements'.format(
daily_frac * 100, n_vals_needed, max_times_in_day
)
)
df[days_with_gaps] = np.nan
else:
self.n_samples_per_day = 1
self._df = df.rename(columns=self.variables)
return freq
@property
def df(self):
"""
See :attr:`fluxdataqaqc.Data.df` as the only difference is that the
:attr:`QaQc.df` is first resampled to daily frequency.
"""
# avoid overwriting pre-assigned data
if isinstance(self._df, pd.DataFrame):
return self._df.rename(columns=self.variables)
@df.setter
def df(self, data_frame):
if not isinstance(data_frame, pd.DataFrame):
raise TypeError("Must assign a Pandas.DataFrame object")
self._df = data_frame
@property
def monthly_df(self):
"""
Temporally resample time series data to monthly frequency based on
monthly means or sums based on :attr:`QaQc.agg_dict`, provides data
as :obj:`pandas.DataFrame`.
Note that monthly means or sums are forced to null values if less than
20 percent of a months days are missing in the daily data
(:attr:`QaQc.df`). Also, for variables that are summed (e.g. ET or
precipitation) missing days (if less than 20 percent of the month) will
be filled with the month's daily mean value before summation.
If a :obj:`QaQc` instance has not yet run an energy balance correction
i.e. :attr:`QaQc.corrected` = :obj:`False` before accessing
:attr:`monthly_df` then the default routine of data correction (energy
balance ratio method) will be conducted.
Utilize the :attr:`QaQc.monthly_df` property the same way as the
:attr:`fluxdataqaqc.Data.df`, see it's API documentation for examples.
Tip:
If you have additional variables in :attr:`QaQc.df` or would like
to change the aggregation method for the monthly time series,
adjust the instance attribute :attr:`QaQc.agg_dict` before
accessing the :attr:`QaQc.monthly_df`.
"""
if not self.corrected and self._has_eb_vars:
self.correct_data()
# rename columns to internal names
df = self._df.rename(columns=self.inv_map).copy()
# avoid including string QC flags because of float forcing on resample
numeric_cols = [c for c in df.columns if not '_qc_flag' in c]
sum_cols = [k for k,v in self.agg_dict.items() if v == 'sum']
# to avoid warning/error of missing columns
sum_cols = list(set(sum_cols).intersection(df.columns))
mean_cols = list(set(numeric_cols) - set(sum_cols))
# if data type has changed to 'obj' resample skips...
# make sure data exists
if len(mean_cols) >= 1:
means = monthly_resample(df[mean_cols], mean_cols, 'mean', 0.8)
else:
means = None
if len(sum_cols) >= 1:
sums = monthly_resample(df[sum_cols], sum_cols, 'sum', 0.8)
else:
sums = None
if isinstance(means, pd.DataFrame) and isinstance(sums, pd.DataFrame):
df = means.join(sums)
elif isinstance(means, pd.DataFrame):
df = means
elif isinstance(sums, pd.DataFrame):
df = sums
# use monthly sums for ebr columns not means of ratio
if set(['LE','H','Rn','G']).issubset(df.columns):
df.ebr = (df.H + df.LE) / (df.Rn - df.G)
if set(['LE_corr','H_corr','Rn','G']).issubset(df.columns):
df.ebr_corr = (df.H_corr + df.LE_corr) / (df.Rn - df.G)
if set(['LE_user_corr','H_user_corr','Rn','G']).issubset(df.columns):
df['ebr_user_corr']=(df.H_user_corr+df.LE_user_corr) / (df.Rn-df.G)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df.rename(columns=self.variables)
def write(self, out_dir=None, use_input_names=False):
"""
Save daily and monthly time series of initial and "corrected" data in
CSV format.
Note, if the energy balance closure correction
(:attr:`QaQc.correct_data`) has not been run, this method will run it
with default options before saving time series files to disk.
The default location for saving output time series files is within an
"output" subdifrectory of the parent directory containing the
config.ini file that was used to create the :obj:`fluxdataqaqc.Data`
and :obj:`QaQc` objects, the names of the files will start with the
site_id and have either the "daily_data" or "monthly_data" suffix.
Keyword Arguments:
out_dir (str or :obj:`None`): default :obj:`None`. Directory to
save CSVs, if :obj:`None` save to :attr:`out_dir` instance
variable (typically "output" directory where config.ini file
exists).
use_input_names (bool): default :obj:`False`. If :obj:`False` use
``flux-data-qaqc`` variable names as in output file header,
or if :obj:`True` use the user's input variable names where
possible (for variables that were read in and not modified or
calculated by ``flux-data-qaqc``).
Returns:
:obj:`None`
Example:
Starting from a config.ini file,
>>> from fluxdataqaqc import Data, QaQc
>>> d = Data('path/to/config.ini')
>>> q = QaQc(d)
>>> # note no energy balance closure correction has been run
>>> q.corrected
False
>>> q.write()
>>> q.corrected
True
Note:
To save data created by multiple correction routines, be sure to
run the correction and then save to different output directories
otherwise output files will be overwritten with the most recently
used correction option.
"""
if out_dir is None:
out_dir = self.out_dir
else:
out_dir = Path(out_dir)
self.out_dir = out_dir.absolute()
if not out_dir.is_dir():
print(
'{} does not exist, creating directory'.format(
out_dir.absolute()
)
)
out_dir.mkdir(parents=True, exist_ok=True)
if not self.corrected and self._has_eb_vars:
print(
'WARNING: energy balance closure corrections have not yet been'
'run. Using default options before writing output time series.'
)
self.correct_data()
daily_outf = out_dir / '{}_daily_data.csv'.format(self.site_id)
monthly_outf = out_dir / '{}_monthly_data.csv'.format(self.site_id)
if use_input_names:
self.df.to_csv(daily_outf)
self.monthly_df.to_csv(monthly_outf)
else:
self.df.rename(columns=self.inv_map).to_csv(daily_outf)
self.monthly_df.rename(columns=self.inv_map).to_csv(monthly_outf)
@classmethod
def from_dataframe(cls, df, site_id, elev_m, lat_dec_deg, var_dict,
drop_gaps=True, daily_frac=1.00, max_interp_hours=2,
max_interp_hours_night=4):
"""
Create a :obj:`QaQc` object from a :obj:`pandas.DataFrame` object.
Arguments:
df (:obj:`pandas.DataFrame`): DataFrame of climate variables with
datetime index named 'date'
site_id (str): site identifier such as station name
elev_m (int or float): elevation of site in meters
lat_dec_deg (float): latitude of site in decimal degrees
var_dict (dict): dictionary that maps `flux-data-qaqc` variable
names to user's columns in `df` e.g. {'Rn': 'netrad', ...}
see :attr:`fluxdataqaqc.Data.variable_names_dict` for list of
`flux-data-qaqc` variable names
Returns:
None
Note:
When using this method, any output files (CSVs, plots) will be
saved to a directory named "output" in the current working
directory.
"""
qaqc = cls()
# use property setter, make sure it is a dataframe object
qaqc.df = df
qaqc.site_id = site_id
qaqc.latitude = lat_dec_deg
qaqc.elevation = elev_m
qaqc.out_dir = Path('output').absolute()
qaqc.variables = var_dict
# TODO handle assigned units
qaqc.inv_map = {v: k for k, v in var_dict.items()}
qaqc.temporal_freq = qaqc._check_daily_freq(
drop_gaps, daily_frac, max_interp_hours, max_interp_hours_night
)
qaqc.corrected = False
qaqc.corr_meth = None
qaqc._has_eb_vars = True
return qaqc
def correct_data(self, meth='ebr', et_gap_fill=True, y='Rn', refET='ETr',
x=['G','LE','H'], fit_intercept=False):
"""
Correct turblent fluxes to improve energy balance closure using an
Energy Balance Ratio method modified from `FLUXNET
<https://fluxnet.fluxdata.org/data/fluxnet2015-dataset/data-processing/>`__.
Currently three correction options are available: 'ebr' (Energy Balance
Ratio), 'br' (Bowen Ratio), and 'lin_regress' (least squares linear
regression). If you use one method followed by another corrected,the
corrected versions of LE, H, ET, ebr, etc. will be overwritten with the
most recently used approach.
This method also computes potential clear sky radiation
(saved as "rso") using an ASCE approach based on station elevation and
latitude. ET is calculated from raw and corrected LE using daily air
temperature to correct the latent heat of vaporization, if air temp. is
not available in the input data then air temp. is assumed at 20
degrees celcius.
Corrected or otherwise newly calculated variables are named using the
following suffixes to distinguish them:
.. code-block:: text
uncorrected LE, H, etc. from input data have no suffix
_corr uses adjusted LE, H, etc. from the correction method used
_user_corr uses corrected LE, H, etc. found in data file (if provided)
Arguments:
y (str): name of dependent variable for regression, must be in
:attr:`QaQc.variables` keys, or a user-added variable.
Only used if ``meth='lin_regress'``.
x (str or list): name or list of independent variables for
regression, names must be in :attr:`QaQc.variables` keys, or a
user-added variable. Only used if ``meth='lin_regress'``.
Keyword Arguments:
meth (str): default 'ebr'. Method to correct energy balance.
et_gap_fill (bool): default True. If true fill any remaining gaps
in corrected ET with ETr * ETrF, where ETr is alfalfa reference
ET from gridMET and ETrF is the filtered, smoothed (7 day
moving avg. min 2 days) and linearly interpolated crop
coefficient. The number of days in each month that corrected ET
are filled will is provided in :attr:`QaQc.monthly_df` as the
column "ET_gap".
refET (str): default "ETr". Which gridMET reference product to use
for ET gap filling, "ETr" or "ETo" are valid options.
fit_intercept (bool): default False. Fit intercept for regression or
set to zero if False. Only used if ``meth='lin_regress'``.
apply_coefs (bool): default False. If :obj:`True` then apply fitted
coefficients to their respective variables for linear regression
correction method, rename the variables with the suffix "_corr".
Returns:
:obj:`None`
Example:
Starting from a correctly formatted config.ini and climate time
series file, this example shows how to read in the data and apply
the energy balance ratio correction without gap-filling with
gridMET ETr x ETrF.
>>> from fluxdataqaqc import Data, QaQc
>>> d = Data('path/to/config.ini')
>>> q = QaQc(d)
>>> q.corrected
False
Now apply the energy balance closure correction
>>> q.correct_data(meth='ebr', et_gap_fill=False)
>>> q.corrected
True
Note:
If ``et_gap_fill`` is set to :obj:`True` (default) the gap filled
days of corrected ET will be used to recalculate LE_corr for those
days with the gap filled values, i.e. LE_corr will also be
gap-filled.
Note:
The *ebr_corr* variable or energy balance closure ratio is
calculated from the corrected versions of LE and H independent
of the method. When using the 'ebr' method the energy balance
correction factor (what is applied to the raw H and LE) is left as
calculated (inverse of ebr) and saved as *ebc_cf*.
See Also:
For explanation of the linear regression method see the
:meth:`QaQc.lin_regress` method, calling that method with the
keyword argument ``apply_coefs=True`` and :math:`Rn` as the y
variable and the other energy balance components as the x variables
will give the same result as the default inputs to this function
when ``meth='lin_regress``.
"""
# in case starting in Python and no df assigned yet
if not isinstance(self._df, pd.DataFrame):
print('Please assign a dataframe of acceptable data first!')
return
if meth not in self.corr_methods:
err_msg = ('ERROR: {} is not a valid correction option, please '
'use one of the following: {}'.format(meth, ', '.join(
[el for el in self.corr_methods]))
)
raise ValueError(err_msg)
# calculate clear sky radiation if not already computed
self._calc_rso()
# energy balance corrections
eb_vars = {'Rn','LE','H','G'}
if not eb_vars.issubset(self.variables.keys()) or\
not eb_vars.issubset(
self.df.rename(columns=self.inv_map).columns):
print(
'\nMissing one or more energy balance variables, cannot perform'
' energy balance correction.'
)
self._has_eb_vars = False
# calculate raw (from input LE) ET
self._calc_et()
# fill gaps of raw ET with ET from smoothed and gap filled ETrF*ETr
if et_gap_fill:
self._ET_gap_fill(et_name='ET', refET=refET)
return
if meth == 'ebr':
self._ebr_correction()
elif meth == 'br':
self._bowen_ratio_correction()
elif meth == 'lin_regress':
if y not in eb_vars or len(set(x).difference(eb_vars)) > 0:
print(
'WARNING: correcting energy balance variables using '
'depedendent or independent variables that are not '
'energy balance components will cause undesired results!'
'\nIt is recommended to use only Rn, G, LE, and H for '
'dependent (y) and independent (x) variables.'
)
self.lin_regress(
y=y, x=x, fit_intercept=fit_intercept, apply_coefs=True
)
self.corr_meth = meth
# calculate raw, corrected ET
self._calc_et()
# fill gaps of corr ET with ET from smoothed and gap filled ETrF*ETr
if et_gap_fill:
self._ET_gap_fill(et_name='ET_corr', refET=refET)
# update inv map for naming
self.inv_map = {
v: k for k, v in self.variables.items() if (
not v.replace('_mean', '') == k or not k in self.df.columns)
}
# using 'G' in multiple g plot may overwrite G name internally
if not 'G' in self.inv_map.values():
user_G_name = self.variables.get('G')
self.inv_map[user_G_name] = 'G'
def _calc_vpd_from_vp(self):
"""
Based on ASCE standardized ref et eqn. 37, air temperature must be in
celcius and actual vapor pressure in kPa.
Can also calculate VP from VPD and air temperature, es and rel.
humidity.
"""
df = self.df.rename(columns=self.inv_map)
# calculate vpd from actual vapor pressure and temp
# check if needed variables exist and units are correct
has_vpd_vars = set(['vp','t_avg']).issubset(df.columns)
units_correct = (
self.units.get('vp') == 'kpa' and self.units.get('t_avg') == 'c'
)
if has_vpd_vars and units_correct:
# saturation vapor pressure (es)
es = 0.6108 * np.exp(17.27 * df.t_avg / (df.t_avg + 237.3))
df['vpd'] = es - df.vp
df['es'] = es
self.variables['vpd'] = 'vpd'
self.units['vpd'] = 'kpa'
self.variables['es'] = 'es'
self.units['es'] = 'kpa'
# same calc actual vapor pressure from vapor pressure deficit and temp
has_vp_vars = set(['vpd','t_avg']).issubset(df.columns)
units_correct = (
self.units.get('vpd') == 'kpa' and self.units.get('t_avg') == 'c'
)
if has_vp_vars and units_correct:
# saturation vapor pressure (es)
es = 0.6108 * np.exp(17.27 * df.t_avg / (df.t_avg + 237.3))
df['vp'] = es - df.vpd
df['es'] = es
self.variables['vp'] = 'vp'
self.units['vp'] = 'kpa'
self.variables['es'] = 'es'
self.units['es'] = 'kpa'
if not 'rh' in self.variables and {'vp','es'}.issubset(self.variables):
if not self.units.get('vp') == 'kpa': pass
else:
print(
'Calculating relative humidity from actual and saturation '
'vapor pressure and air temperature'
)
df['rh'] = 100 * (df.vp / df.es)
self.variables['rh'] = 'rh'
self.units['rh'] = '%'
self._df = df
def _ET_gap_fill(self, et_name='ET_corr', refET='ETr'):
"""
Use gridMET reference ET to calculate ET from ETrF/EToF, smooth and gap
fill calced ET and then use to fill gaps in corrected ET. Keeps tabs on
number of days in ET that were filled in each month.
Keyword Arguments:
et_name (str): default "ET_corr". Name of ET variable to use when
calculating the crop coefficient and to fill gaps with
calculated ET.
Returns:
:obj:`None`
"""
# drop relavant calculated variables if they exist
self._df = _drop_cols(self._df, self._et_gap_fill_vars)
# get ETr if not on disk
if not self.gridMET_exists:
self.download_gridMET()
else:
# gridMET file has been verified and has all needed dates, just load
gridfile = self.config.get(
'METADATA','gridMET_file_path',fallback=None
)
print(
'gridMET reference ET already downloaded for station at:\n'
'{}\nnot redownloading.'.format(gridfile)
)
grid_df = | pd.read_csv(gridfile, parse_dates=True, index_col='date') | pandas.read_csv |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
def test_constructor(frame_or_series):
c = frame_or_series(range(5)).ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError, match=msg):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError, match=msg):
c(alpha=0.5, span=1.5)
# not valid: com < 0
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
c(com=-0.5)
# not valid: span < 1
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
c(span=0.5)
# not valid: halflife <= 0
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
msg = "alpha must satisfy: 0 < alpha <= 1"
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError, match=msg):
c(alpha=alpha)
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(method):
# see gh-12811
e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
def test_ewma_times_not_datetime_type():
msg = r"times must be datetime64\[ns\] dtype."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(5))
def test_ewma_times_not_same_length():
msg = "times must be the same length as the object."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]"))
def test_ewma_halflife_not_correct_type():
msg = "halflife must be a timedelta convertible object"
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
def test_ewma_halflife_without_times(halflife_with_times):
msg = "halflife can only be a timedelta convertible argument if times is not None."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=halflife_with_times)
@pytest.mark.parametrize(
"times",
[
np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"),
date_range("2000", freq="D", periods=10),
date_range("2000", freq="D", periods=10).tz_localize("UTC"),
],
)
@pytest.mark.parametrize("min_periods", [0, 2])
def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="nuisance columns"):
# GH#42738
result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()
expected = df.ewm(halflife=1.0, min_periods=min_periods).mean()
tm.assert_frame_equal(result, expected)
def test_ewma_with_times_variable_spacing(tz_aware_fixture):
tz = tz_aware_fixture
halflife = "23 days"
times = DatetimeIndex(
["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"]
).tz_localize(tz)
data = np.arange(3)
df = DataFrame(data)
result = df.ewm(halflife=halflife, times=times).mean()
expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459])
tm.assert_frame_equal(result, expected)
def test_ewm_with_nat_raises(halflife_with_times):
# GH#38535
ser = Series(range(1))
times = DatetimeIndex(["NaT"])
with pytest.raises(ValueError, match="Cannot convert NaT values to integer"):
ser.ewm(com=0.1, halflife=halflife_with_times, times=times)
def test_ewm_with_times_getitem(halflife_with_times):
# GH 40164
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
times = date_range("2000", freq="D", periods=10)
df = DataFrame({"A": data, "B": data})
result = df.ewm(halflife=halflife, times=times)["A"].mean()
expected = df.ewm(halflife=1.0)["A"].mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["com", "halflife", "span", "alpha"])
def test_ewm_getitem_attributes_retained(arg, adjust, ignore_na):
# GH 40164
kwargs = {arg: 1, "adjust": adjust, "ignore_na": ignore_na}
ewm = DataFrame({"A": range(1), "B": range(1)}).ewm(**kwargs)
expected = {attr: getattr(ewm, attr) for attr in ewm._attributes}
ewm_slice = ewm["A"]
result = {attr: getattr(ewm, attr) for attr in ewm_slice._attributes}
assert result == expected
def test_ewm_vol_deprecated():
ser = Series(range(1))
with tm.assert_produces_warning(FutureWarning):
result = ser.ewm(com=0.1).vol()
expected = ser.ewm(com=0.1).std()
tm.assert_series_equal(result, expected)
def test_ewma_times_adjust_false_raises():
# GH 40098
with pytest.raises(
NotImplementedError, match="times is not supported with adjust=False."
):
Series(range(1)).ewm(
0.1, adjust=False, times=date_range("2000", freq="D", periods=1)
)
@pytest.mark.parametrize(
"func, expected",
[
[
"mean",
DataFrame(
{
0: range(5),
1: range(4, 9),
2: [7.428571, 9, 10.571429, 12.142857, 13.714286],
},
dtype=float,
),
],
[
"std",
DataFrame(
{
0: [np.nan] * 5,
1: [4.242641] * 5,
2: [4.6291, 5.196152, 5.781745, 6.380775, 6.989788],
}
),
],
[
"var",
DataFrame(
{
0: [np.nan] * 5,
1: [18.0] * 5,
2: [21.428571, 27, 33.428571, 40.714286, 48.857143],
}
),
],
],
)
def test_float_dtype_ewma(func, expected, float_numpy_dtype):
# GH#42452
df = DataFrame(
{0: range(5), 1: range(6, 11), 2: range(10, 20, 2)}, dtype=float_numpy_dtype
)
e = df.ewm(alpha=0.5, axis=1)
result = getattr(e, func)()
tm.assert_frame_equal(result, expected)
def test_times_string_col_deprecated():
# GH 43265
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="Specifying times"):
result = df.ewm(halflife="1 day", min_periods=0, times="time_col").mean()
expected = df.ewm(halflife=1.0, min_periods=0).mean()
tm.assert_frame_equal(result, expected)
def test_ewm_sum_adjust_false_notimplemented():
data = Series(range(1)).ewm(com=1, adjust=False)
with pytest.raises(NotImplementedError, match="sum is not"):
data.sum()
@pytest.mark.parametrize(
"expected_data, ignore",
[[[10.0, 5.0, 2.5, 11.25], False], [[10.0, 5.0, 5.0, 12.5], True]],
)
def test_ewm_sum(expected_data, ignore):
# xref from Numbagg tests
# https://github.com/numbagg/numbagg/blob/v0.2.1/numbagg/test/test_moving.py#L50
data = Series([10, 0, np.nan, 10])
result = data.ewm(alpha=0.5, ignore_na=ignore).sum()
expected = Series(expected_data)
tm.assert_series_equal(result, expected)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
@pytest.mark.parametrize(
"s, adjust, ignore_na, w",
[
(
Series([np.nan, 1.0, 101.0]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, 101.0]),
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
True,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
False,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
False,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
False,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
True,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0))
* ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0)),
],
),
],
)
def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
# GH 7603
expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill")
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=2.0, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewm_alpha():
# GH 10789
arr = np.random.randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
s = Series(arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_domain_checks():
# GH 12492
arr = np.random.randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
s = Series(arr)
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(span=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.0)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=0.0)
s.ewm(halflife=0.1)
msg = "alpha must satisfy: 0 < alpha <= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=1.1)
@pytest.mark.parametrize("method", ["mean", "std", "var"])
def test_ew_empty_series(method):
vals = Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
@pytest.mark.parametrize("min_periods", [0, 1])
@pytest.mark.parametrize("name", ["mean", "var", "std"])
def test_ew_min_periods(min_periods, name):
# excluding NaNs correctly
arr = np.random.randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == "mean":
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(Series(dtype=object).ewm(com=50, min_periods=min_periods), name)()
tm.assert_series_equal(result, Series(dtype="float64"))
# check series of length 1
result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)()
if name == "mean":
tm.assert_series_equal(result, Series([1.0]))
else:
# ewm.std, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()
assert result2.dtype == np.float_
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=range(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B.iloc[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=range(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B.iloc[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=range(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
@pytest.mark.parametrize("name", ["var", "std", "mean"])
def test_ewma_series(series, name):
series_result = getattr(series.ewm(com=10), name)()
assert isinstance(series_result, Series)
@pytest.mark.parametrize("name", ["var", "std", "mean"])
def test_ewma_frame(frame, name):
frame_result = getattr(frame.ewm(com=10), name)()
assert isinstance(frame_result, DataFrame)
def test_ewma_span_com_args(series):
A = series.ewm(com=9.5).mean()
B = series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(com=9.5, span=20)
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
series.ewm().mean()
def test_ewma_halflife_arg(series):
A = series.ewm(com=13.932726172912965).mean()
B = series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(span=20, halflife=50)
with pytest.raises(ValueError, match=msg):
series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError, match=msg):
series.ewm(com=9.5, span=20, halflife=50)
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
series.ewm()
def test_ewm_alpha_arg(series):
# GH 10789
s = series
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
s.ewm()
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError, match=msg):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=10.0, alpha=0.5)
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
def test_numeric_only_frame(arithmetic_win_operators, numeric_only):
# GH#46560
kernel = arithmetic_win_operators
df = DataFrame({"a": [1], "b": 2, "c": 3})
df["c"] = df["c"].astype(object)
ewm = df.ewm(span=2, min_periods=1)
op = getattr(ewm, kernel, None)
if op is not None:
result = op(numeric_only=numeric_only)
columns = ["a", "b"] if numeric_only else ["a", "b", "c"]
expected = df[columns].agg([kernel]).reset_index(drop=True).astype(float)
assert list(expected.columns) == columns
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kernel", ["corr", "cov"])
@pytest.mark.parametrize("use_arg", [True, False])
def test_numeric_only_corr_cov_frame(kernel, numeric_only, use_arg):
# GH#46560
df = DataFrame({"a": [1, 2, 3], "b": 2, "c": 3})
df["c"] = df["c"].astype(object)
arg = (df,) if use_arg else ()
ewm = df.ewm(span=2, min_periods=1)
op = getattr(ewm, kernel)
result = op(*arg, numeric_only=numeric_only)
# Compare result to op using float dtypes, dropping c when numeric_only is True
columns = ["a", "b"] if numeric_only else ["a", "b", "c"]
df2 = df[columns].astype(float)
arg2 = (df2,) if use_arg else ()
ewm2 = df2.ewm(span=2, min_periods=1)
op2 = getattr(ewm2, kernel)
expected = op2(*arg2, numeric_only=numeric_only)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [int, object])
def test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype):
# GH#46560
kernel = arithmetic_win_operators
ser = Series([1], dtype=dtype)
ewm = ser.ewm(span=2, min_periods=1)
op = getattr(ewm, kernel, None)
if op is None:
# Nothing to test
return
if numeric_only and dtype is object:
msg = f"ExponentialMovingWindow.{kernel} does not implement numeric_only"
with pytest.raises(NotImplementedError, match=msg):
op(numeric_only=numeric_only)
else:
result = op(numeric_only=numeric_only)
expected = ser.agg([kernel]).reset_index(drop=True).astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kernel", ["corr", "cov"])
@pytest.mark.parametrize("use_arg", [True, False])
@pytest.mark.parametrize("dtype", [int, object])
def test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype):
# GH#46560
ser = | Series([1, 2, 3], dtype=dtype) | pandas.Series |
import pandas as pd
def load_payment(payment_file: str) -> pd.DataFrame:
payments = pd.read_csv(payment_file, sep=',')
pmnts_sts = payments.groupby(['client_id', 'pmnts_name']).agg(['mean', 'count'])
pmnts_sts.columns = ['_'.join(col) for col in pmnts_sts.columns]
pmnts_sts = pmnts_sts.reset_index()
features_pool = []
for values in ['sum_rur_mean', 'sum_rur_count']:
df_features_ = pd.pivot_table(
pmnts_sts,
values=values,
index='client_id',
columns='pmnts_name',
)
df_features_.columns = df_features_.columns.str.replace(' ', '_')
df_features_ = df_features_.add_prefix('pmnts_name_' + values + '_')
features_pool.append(df_features_)
feats = | pd.concat(features_pool, axis=1) | pandas.concat |
from bs4 import BeautifulSoup
import requests
import pandas as pd
def get_data():
#Store the data url and table in a variable to be used later
data_url = 'https://en.wikipedia.org/wiki/Road_safety_in_Europe'
data_id = 'wikitable sortable'
#Send a request to get the data from the data_url link
response = requests.get(data_url)
#Using the BeautifulSoup library to store and parse out the data recieved from the get request
soup = BeautifulSoup(response.text, 'html.parser')
#using the find method to specifically select the table with the specified class attribute and storing into a variable
road_safety_table = soup.find('table', {'class' : data_id})
#Autopopulate the row span technique to set a new row after the data from each previous tag has been set
rows = road_safety_table.find_all('tr')
#Setting the column header as the first row of the raw data returned and replacing the \n string from the header names with an empty string
columns = [v.text.replace('\n', '') for v in rows[0].find_all('th')]
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import unittest
import canopy
import pandas as pd
import numpy as np
temperatureK = 373.15
temperatureC = 100.0
temperatureF = 212.0
temperatureK2 = 473.15
temperatureC2 = 200.0
temperatureF2 = 392.0
class UnitsTest(unittest.TestCase):
def setUp(self):
self.units = canopy.Units()
# Specific units.
def test_specific_units(self):
self.assertAlmostEqual(
self.units.convert_value_between_units(1, 'inHg', 'Pa'),
3386.39, delta=0.01);
self.assertAlmostEqual(
self.units.convert_value_between_units(1000, 'Pa', 'inHg'),
0.2953, delta=0.0001);
def test_try_get_conversion_to_si(self):
existing = self.units.try_get_conversion_to_si('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
missing = self.units.try_get_conversion_to_si('blah')
self.assertEqual(missing, None)
def test_get_conversion_to_si(self):
existing = self.units.get_conversion_to_si('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
with self.assertRaises(KeyError):
self.units.get_conversion_to_si('blah')
def test_get_conversion_to_si_or_default(self):
existing = self.units.get_conversion_to_si_or_default('F')
self.assertEqual(existing.factor, 5/9)
self.assertEqual(existing.offset, 459.67 * 5 / 9)
missing = self.units.get_conversion_to_si_or_default('blah')
self.assertEqual(missing.factor, 1)
self.assertEqual(missing.offset, 0)
# FROM SI
def test_convert_value_from_si(self):
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'C'), temperatureC, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'F'), temperatureF, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'C', True), temperatureK, delta=0.01)
self.assertAlmostEqual(self.units.convert_value_from_si(temperatureK, 'F', True), temperatureK * 9 / 5, delta=0.01)
self.assertEqual(self.units.convert_value_from_si(temperatureK, 'K'), temperatureK)
def test_convert_array_from_si(self):
data = np.array([temperatureK, temperatureK2])
data_copy = np.copy(data)
result = self.units.convert_array_from_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(np.array_equal(result, data))
self.assertTrue(np.array_equal(data, data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_array_from_si_no_conversion_required(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'K')
self.assertIs(result, data)
def test_convert_array_from_si_always_return_copy(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(np.array_equal(result, data))
def test_convert_array_from_si_inplace(self):
data = np.array([temperatureK, temperatureK2])
result = self.units.convert_array_from_si(data, 'F', inplace=True)
self.assertIs(result, data)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_series_from_si(self):
data = pd.Series([temperatureK, temperatureK2])
data_copy = data.copy()
result = self.units.convert_series_from_si(data, 'F')
self.assertIsNot(result, data)
self.assertFalse(result.equals(data))
self.assertTrue(data.equals(data_copy))
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], temperatureF, delta=0.01)
self.assertAlmostEqual(result[1], temperatureF2, delta=0.01)
def test_convert_series_from_si_no_conversion_required(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'K')
self.assertIs(result, data)
def test_convert_series_from_si_always_return_copy(self):
data = pd.Series([temperatureK, temperatureK2])
result = self.units.convert_series_from_si(data, 'K', always_return_copy=True)
self.assertIsNot(result, data)
self.assertTrue(result.equals(data))
def test_convert_series_from_si_inplace(self):
data = | pd.Series([temperatureK, temperatureK2]) | pandas.Series |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
pd.core.arrays.PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(
pd.IntervalIndex.from_breaks([0, 1, 2]),
pd.core.arrays.IntervalArray,
"interval",
),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(pd.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(pd.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
pd.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(pd.core.arrays.IntervalArray.from_breaks([0, 1]), "_left"),
(pd.SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_data",
),
],
)
@pytest.mark.parametrize("box", [pd.Series, pd.Index])
def test_array(array, attr, box):
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip("No index type for {}".format(array.dtype))
result = box(array, copy=False).array
if attr:
array = getattr(array, attr)
result = getattr(result, attr)
assert result is array
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
with pytest.raises(ValueError, match="MultiIndex"):
idx.array
@pytest.mark.parametrize(
"array, expected",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
),
(
pd.core.arrays.integer_array([0, np.nan]),
np.array([0, np.nan], dtype=object),
),
(
pd.core.arrays.IntervalArray.from_breaks([0, 1, 2]),
np.array([ | pd.Interval(0, 1) | pandas.Interval |
from pyPheWAS.pyPhewasCorev2 import icd9_codes, icd10_codes
import pandas as pd
import time
import numpy as np
from Bio import Entrez
from tqdm import tqdm
dev_email = '<EMAIL>'
umls_cols = ['CUI', 'LAT', 'TS', 'LUI', 'STT', 'SUI', 'ISPREF', 'AUI', 'SAUI', 'SCUI', 'SDUI', 'SAB', 'TTY', 'CODE',
'STR', 'SRL', 'SUPPRESS', 'CVF', 'other']
usable_cols = ['CUI','LAT','SAB','CODE','STR']
def load_umls(umls_path):
umls = pd.read_table(umls_path, sep='|', header=None, names=umls_cols, skiprows=1, low_memory=False, usecols=usable_cols)
umls = umls[umls['LAT']=='ENG']
return umls
def load_search_terms(term_file):
df = | pd.read_csv(term_file, header=None, names=['search_terms']) | pandas.read_csv |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
'''
Simply utility functions to improve QOL of QM developers and QM users
'''
import logging
import re
import sys
import traceback
import warnings
import logging
import sys
import os
import pandas as pd
import numpy as np
from scipy.spatial import distance
from typing import Tuple, TYPE_CHECKING
from copy import deepcopy
from qiskit_metal.draw import Vector
from numpy.linalg import norm
if TYPE_CHECKING:
from qiskit_metal import logger
__all__ = [
'copy_update', 'dict_start_with', 'data_frame_empty_typed', 'clean_name',
'enable_warning_traceback', 'get_traceback', 'print_traceback_easy',
'log_error_easy', 'monkey_patch', 'can_write_to_path',
'can_write_to_path_with_warning', 'toggle_numbers', 'bad_fillet_idxs',
'compress_vertex_list', 'get_range_of_vertex_to_not_fillet'
]
####################################################################################
# Dictionary related
def copy_update(options, *args, deep_copy=True, **kwargs):
'''
Utility funciton to merge two dictionaries.
Args:
options (object): Options
deep_copy (bool): True to do a deep copy
kwargs (dict): Dictionary of parameters
Returns:
dict: Merged dictionary
'''
if deep_copy:
options = deepcopy(options)
options.update(*args, **kwargs)
else:
options = options.copy()
options.update(*args, **kwargs)
return options
def dict_start_with(my_dict, start_with, as_=list):
''' Case sensitive
https://stackoverflow.com/questions/17106819/accessing-python-dict-values-with-the-key-start-characters
Args:
my_dict (dict): The dictionary
starts_with (str): String to check of
as_ (type): A list of dict. Defaults to list.
Returns:
list or dict: Parts of the dictionary with keys starting with the given text
.. code-block:: python
my_dict = {'name': 'Klauss', 'age': 26, 'Date of birth': '15th july'}
dict_start_with(my_dict, 'Date')
'''
if as_ == list:
# start_with in k]
return [v for k, v in my_dict.items() if k.startswith(start_with)]
elif as_ == dict:
return {k: v for k, v in my_dict.items() if k.startswith(start_with)}
# def display_options(*ops_names, options=None, find_dot_keys=True, do_display=True):
# '''
# Print html display of options dictionary by default `DEFAULT_OPTIONS`
# Example use:
# ---------------
# display_options('make_transmon_pocket_v1', 'make_transmon_connector_v1')
# or
# dfs, html = display_options(Metal_Transmon_Pocket.__name__, do_display=False)
# '''
# # IDEA: display also ._hfss and ._gds etc. for those that have it and add to plugins
# if options is None:
# from .. import DEFAULT_OPTIONS
# options = DEFAULT_OPTIONS
# res = []
# for keyname in ops_names:
# if find_dot_keys:
# names = list(filter(lambda x, match=keyname: x is match or
# x.startswith(match+'.'), DEFAULT_OPTIONS.keys()))
# names.sort()
# for name in names:
# res += [pd.Series(options[name], name=name).to_frame()]
# else:
# res += [pd.Series(options[keyname], name=keyname).to_frame()]
# from pyEPR.toolbox import display_dfs
# res_html = display_dfs(*res, do_display=do_display) #why not just directly call the function DataFrame_display_side_by_side(*args) ?
# return res, res_html
def data_frame_empty_typed(column_types: dict):
"""Creates and empty DataFrame with dtypes for each column given
by the dictionary.
Arguments:
column_types (dict): A key, dtype pairs
Returns:
DataFrame: An empty dataframe with the typed columns
"""
df = | pd.DataFrame() | pandas.DataFrame |
"""
Analysis functions for pom data
05/09/2018
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
" Creating dataframes and aggregating population biomarker data per simulation "
def fill_in_grouped_sim_data(grouped_sim_data, results, biomarker,
sim_types, sim_type_amp_relationship,
sim_name_conversions,
how_to_fill, filter_biomarkers_for_outliers=False, threshold=None):
""""
Fill in a dataframe with aggregated biomarker data
"""
# Check inputs
assert any([amp_scale_factor == 1 for _,amp_scale_factor in sim_type_amp_relationship.items()]), "No base value of 1 for any of the sim types in sim_type_amp_relationship"
for name in grouped_sim_data.index:
# To do here: get simulations from the conversion dict, do the appropriate averaging for the how_to_fill method, add that data in
# Then, get gnav18 from the simulation short name and the amplitudes from the long names using the conversion dict.
# Add these in too.
# And we're done
#names =
# Get simulations
simulations = sim_name_conversions[name]
for simulation, sim_type in simulations.items():
# Input biomarker data
unaggregated_data = results.loc[:,(simulation,biomarker)]
if threshold:
# Only keep rows that are not below or at threshold
unaggregated_data = unaggregated_data[unaggregated_data <= threshold]
# filtering of biomarkers for outliers
if filter_biomarkers_for_outliers:
if biomarker == "APHalfWidth":
# Remove outliers
outlier_definition = get_outlier_definition(biomarker) # ms
outliers = unaggregated_data >= outlier_definition
num_outliers = outliers.sum()
mask = ~outliers # Invert boolean series
unaggregated_data = unaggregated_data[mask]
#if num_outliers > 0:
#print("Removed {} AP Half Width outliers > {}".format(num_outliers, outlier_definition))
if how_to_fill == 'mean':
data = unaggregated_data.mean()
elif how_to_fill == 'std':
data = unaggregated_data.std()
elif how_to_fill == 'median':
data = unaggregated_data.median()
elif how_to_fill == 'mean_fillna':
data = unaggregated_data.fillna(0).mean()
elif how_to_fill == 'mean_freq_fillna':
assert biomarker == 'ISI', "Biomarker for frequency needs to be ISI not {}".format(biomarker)
# Convert to frequency, then fill nans with 0s and take mean
unaggregated_data = 1000.0/unaggregated_data
data = unaggregated_data.fillna(0).mean()
elif how_to_fill == 'mean_freq_dropna':
assert biomarker == 'ISI'
# Convert to frequency, then DROP nans and take mean
unaggregated_data = 1000.0/unaggregated_data
data = unaggregated_data.dropna().mean()
elif isinstance(how_to_fill, int):
# Model index
data = unaggregated_data.loc[how_to_fill]
else:
raise ValueError("How to fill method: {} not supported.".format(how_to_fill))
grouped_sim_data.at[name,(biomarker, sim_type)] = data
# Input amplitudes
amp = get_amplitude(simulation, amp_units='pA', delimiter='_')
grouped_sim_data.at[name, ('Amp', sim_type)] = amp
# Input scaling factors
scaling_factors = list(grouped_sim_data['Scaling factors'].columns)
for scaling_factor in scaling_factors:
# Get scaling factor
scaling_factor_value = get_parameter_scaling(simulation, scaling_factor, delimiter='_')
grouped_sim_data.at[name, ('Scaling factors', scaling_factor)] = scaling_factor_value
def make_grouped_sim_data(pop, biomarker='APFullWidth', agg='mean', filter_outliers=False, scaled_parameters=['GNav18'], threshold=None):
" Aggregate population biomarker results per simulation to analyse at a per simulation level over the ensemble population. "
sim_types = ['step', 'ramp'] # First entry in list is the base
sim_type_as_base = sim_types[0] # Use step as base as ramp has amplitude x10 of step
sim_type_amp_relationship = {'step':1, 'ramp':10}
assert sim_type_amp_relationship[sim_type_as_base] == 1
grouped_sim_data = make_empty_grouped_sim_data( pop=pop,
biomarker=biomarker,
filter_outliers=filter_outliers,
scaled_parameters=scaled_parameters,
sim_types=sim_types,
sim_type_as_base=sim_type_as_base,
sim_type_amp_relationship=sim_type_amp_relationship,
)
sim_name_conversions = make_sim_name_conversions (pop.get_simulation_names(),
sim_types,
sim_type_amp_relationship,
sim_type_as_base
)
fill_in_grouped_sim_data(grouped_sim_data, pop.results, biomarker,
sim_types, sim_type_amp_relationship,
sim_name_conversions,
how_to_fill=agg,
filter_biomarkers_for_outliers=filter_outliers,
threshold=threshold)
if biomarker is not 'Firing pattern':
grouped_sim_data = grouped_sim_data.astype(float)
return grouped_sim_data
def make_empty_grouped_sim_data(pop, biomarker='APFullWidth', filter_outliers=False,
scaled_parameters=['GNav18'], sim_types=['step','ramp'],
sim_type_as_base='step', sim_type_amp_relationship={'step':1, 'ramp':10},
):
" Aggregate population biomarker results per simulation to analyse at a per simulation level over the ensemble population. "
arrays =[[biomarker]*len(sim_types)+['Amp']*len(sim_types) + ['Scaling factors']*len(scaled_parameters),sim_types*2 + scaled_parameters] # Build multiarray columns
columns = pd.MultiIndex.from_arrays(arrays, names=['',''])
sim_names = pop.get_simulation_names()
sim_name_conversions = make_sim_name_conversions(sim_names, sim_types, sim_type_amp_relationship, sim_type_as_base)
short_sim_names = sorted(list(sim_name_conversions.keys()))
grouped_sim_data = pd.DataFrame(columns=columns, index=short_sim_names)
return grouped_sim_data
def make_sim_name_conversions(sim_names, sim_types, sim_type_amp_relationship, sim_type_as_base):
" Make conversion dict from short sim names to full sim names with sim type "
sim_name_conversions = {}
# Get list of sim_names common to step and ramp
# Complexity: ramp and step amps are different so we will just get the name from step
for sim_type in sim_types:
for sim_name in sim_names:
# # Ignore rheobase simulations and do base sim type first - important to get oreder right in sim_name_conversions
if (sim_name not in sim_types) & (sim_type in sim_name):
short_sim_name, _sim_type = process_sim_name(sim_name, sim_types, sim_type_amp_relationship, amp_units='pA', delimiter='_')
assert _sim_type == sim_type
# Build up conversion dict from short name to full names and store sim_type
if short_sim_name in sim_name_conversions.keys():
sim_name_conversions[short_sim_name][sim_name] = sim_type
else:
assert sim_type == sim_type_as_base, "Sim type: {}, name:{}, short_sim_name:{}".format(sim_type, sim_name, short_sim_name)
sim_name_conversions[short_sim_name] = {sim_name:sim_type}
return sim_name_conversions
" Data processing "
def process_firing_pattern_data(firing_pattern_percentages, sim_types=['step','ramp'],
sim_type_amp_relationship = {'step':1, 'ramp':10}, scaled_parameters=['GNav18'],
):
"""
Process simulation names to extract simulation parameters and rename to remove stimulation protocol from name
Flow:
1. Convert sim names by removing sim type and storing the conversion between full and shortened names.
2. Create a formatted dataframe and fill in the firing pattern percentages from the values in the original dataframe.
3. Extract simulation parameters from the simulation name and add to the formatted dataframe.
TODO: This code shares a lot of code with the functions for aggregating biomarker data. Could refactor into one set of functions
sharing common code.
"""
# TODO: Could turn these lines creating sim_names and sim_name_conversions
# into a function shared with similar code for biomarkers
sim_type_as_base = sim_types[0]
sim_names = firing_pattern_percentages.index.tolist()
short_sim_names = [] # Simulation names without stimulation protocol
sim_name_conversions = {} # Conversion between full and short sim names
for sim_type in sim_types:
for sim_name in sim_names:
if (sim_name not in sim_types) & (sim_type in sim_name): # Remove rheobase simulations
short_sim_name, _sim_type = process_sim_name(sim_name, sim_types, sim_type_amp_relationship, amp_units='pA', delimiter='_')
assert _sim_type == sim_type
# Create conversion between names
if short_sim_name in sim_name_conversions.keys():
sim_name_conversions[short_sim_name][sim_name] = sim_type
else:
assert sim_type == sim_type_as_base, (
"Sim type: {}, name:{}, short_sim_name:{} is not the base sim type for the sim type amp relationship.".format(
sim_type, sim_name, short_sim_name))
sim_name_conversions[short_sim_name] = {sim_name:sim_type}
if sim_type == sim_type_as_base: # Only add step to sim_names to avoid adding ramp as ramp has different amplitude
short_sim_names.append(short_sim_name)
short_sim_names = sorted(short_sim_names)
formatted_firing_pattern_data = format_firing_pattern_percentages(
firing_pattern_percentages,
short_sim_names,
sim_name_conversions,
scaled_parameters,
sim_types,
sim_type_amp_relationship,
)
return formatted_firing_pattern_data
def format_firing_pattern_percentages(firing_pattern_percentages, short_sim_names, sim_name_conversions, scaled_parameters, sim_types, sim_type_amp_relationship):
"""
Fill in a dataframe with firing pattern percentages for each simulation
Equivalent to fill_in_grouped_sim_data() but for firing patterns not single numeric biomarkers.
Copy code from fill_in_grouped_sim_data where needed but:
1. We don't need a how to fill option as we aggregate by percentages always.
2. We do need to fill in for all firing patterns, not just one biomarker.
"""
" Create formatted dataframe with column multiindex "
assert len(scaled_parameters) == 1, "Multiple scaled parameters not supported by format_firing_percentages yet"
firing_pattern_names = firing_pattern_percentages.columns.tolist()
sim_type_as_base = sim_types[0] # First sim type in list is the base
assert sim_type_amp_relationship[sim_type_as_base] == 1
arrays = [list(np.repeat(firing_pattern_names,len(sim_types))) + ['Amp','Amp', 'Scaling factors'],
sim_types*(1+len(firing_pattern_names)) + scaled_parameters] # Build multiarray columns
columns = pd.MultiIndex.from_arrays(arrays, names=['',''])
formatted_firing_pattern_data = | pd.DataFrame(index=short_sim_names, columns=columns) | pandas.DataFrame |
import pandas as pd
HDNames= ['Cement','BFS','FLA','Water','SP','CA','FA','Age','CCS']
Data = pd.read_excel('ConcreteData.xlsx', names=HDNames)
print(Data.head(20))
print(Data.info())
summary = Data.describe()
print(summary)
import seaborn as sns
sns.set(style="ticks")
sns.boxplot(data = Data)
sns.pairplot(data = Data)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
print(scaler.fit(Data))
DataScaled = scaler.fit_transform(Data)
DataScaled = pd.DataFrame(DataScaled, columns=HDNames)
summary = DataScaled.describe()
print(summary)
sns.boxplot(data = DataScaled)
from sklearn.model_selection import train_test_split
Predictors = pd.DataFrame(DataScaled.iloc[:,:8])
Response = | pd.DataFrame(DataScaled.iloc[:,8]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Week 4 Video notebooks
#
# This is the notebook file corresponding to the Week 4 videos.
# ## Encoding data types
#
# Reference: [Altair documentation](https://altair-viz.github.io/user_guide/encoding.html#encoding-data-types)
# In[1]:
import pandas as pd
import altair as alt
# In[2]:
df = pd.DataFrame({"a":[3,2,1,4],"b":[4,8,3,1]})
# In[3]:
alt.Chart(df).mark_bar().encode(
x = "a",
y = "b"
)
# In[4]:
df
# In[5]:
alt.Chart(df).mark_bar(width=50).encode(
x = "a",
y = "b"
)
# In[6]:
alt.Chart(df).mark_bar().encode(
x = "a:N",
y = "b",
color = "a:N"
)
# In[7]:
alt.Chart(df).mark_bar().encode(
x = "a:O",
y = "b",
color = "a:O"
)
# In[8]:
alt.Chart(df).mark_bar().encode(
x = alt.X("a:O", sort=None),
y = "b",
color = "a:O"
)
# In[9]:
df.a
# ## Interactive bar chart
# In[10]:
import pandas as pd
import altair as alt
import seaborn as sns
# In[11]:
penguin = sns.load_dataset("penguins")
# In[12]:
penguin
# In[13]:
penguin.columns
# In[14]:
c1 = alt.Chart(penguin).mark_circle().encode(
x = alt.X('bill_length_mm', scale=alt.Scale(zero=False)),
y = alt.Y('flipper_length_mm',scale=alt.Scale(domain=(160,240))),
color = "species"
)
# In[15]:
type(c1)
# In[16]:
c1
# In[17]:
brush = alt.selection_interval()
# In[18]:
c1.add_selection(brush)
# In[19]:
c2 = alt.Chart(penguin).mark_bar().encode(
x = "species",
y = "count()",
color = "species"
)
# In[20]:
c2
# In[21]:
c1 = c1.add_selection(brush)
# In[22]:
penguin.species.unique()
# In[23]:
c2 = alt.Chart(penguin).mark_bar().encode(
x = alt.X("species", scale = alt.Scale(domain=penguin.species.unique())),
y = alt.Y("count()", scale = alt.Scale(domain=(0,160))),
color = "species"
).transform_filter(brush)
# In[24]:
c1|c2
# ## pd.to_datetime
# In[25]:
import pandas as pd
import altair as alt
import seaborn as sns
# In[26]:
taxis = sns.load_dataset("taxis")
# In[27]:
taxis.head(6)
# In[28]:
alt.Chart(taxis[::10]).mark_circle().encode(
x = "pickup",
y = "distance"
)
# In[29]:
taxis.dtypes
# In[30]:
taxis.loc[10,"pickup"]
# In[31]:
type(taxis.loc[10,"pickup"])
# In[32]:
alt.Chart(taxis[::10]).mark_circle().encode(
x = "pickup:T",
y = "distance",
tooltip = "pickup:T"
)
# In[33]:
taxis["pickup"]
# In[34]:
pd.to_datetime(taxis["pickup"])
# In[35]:
| pd.to_datetime(taxis["pickup"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 13:37:10 2019
@author:Imarticus Machine Learning Team
"""
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
pd.options.mode.chained_assignment = None # default='warn'
order_products_test_df= | pd.read_csv("order_products_test.csv") | pandas.read_csv |
"""
Author: <NAME>
Created: 27/08/2020 11:13 AM
"""
import pandas as pd
import os
import numpy as np
from supporting_functions.conversions import convert_RH_vpa
from supporting_functions.woodward_2020_params import get_woodward_mean_full_params
test_dir = os.path.join(os.path.dirname(__file__), 'test_data')
def establish_peyman_input(return_pet=False):
# use the scott farm so that it doesn't need irrigation
# time period [2010 - 2013)
# load weather data
weather_path = os.path.join(test_dir, 'hamilton_ruakura_ews2010-2013_{}.csv')
pressure = pd.read_csv(os.path.join(test_dir, 'hamilton_AWS_pressure.csv'),
skiprows=8).loc[:, ['year',
'doy',
'pmsl']].set_index(['year', 'doy'])
rain = pd.read_csv(weather_path.format('rain')).loc[:, ['year',
'doy',
'rain']].set_index(['year', 'doy'])
temp = pd.read_csv(weather_path.format('temp')).loc[:, ['year',
'doy',
'tmax', 'tmin']].set_index(['year', 'doy'])
rad = pd.read_csv(weather_path.format('rad')).loc[:, ['year',
'doy',
'radn']].set_index(['year', 'doy'])
wind = pd.read_csv(weather_path.format('wind')).loc[:, ['year',
'doy',
'wind']].set_index(['year', 'doy'])
pet = pd.read_csv(weather_path.format('pet')).loc[:, ['year',
'doy',
'pet']].set_index(['year', 'doy'])
rh = pd.read_csv(weather_path.format('rh')).loc[:, ['year',
'doy',
'rh']]
rh.loc[:, 'rh'] = pd.to_numeric(rh.rh, errors='coerce')
rh = rh.groupby(['year', 'doy']).mean()
dates = pd.Series(pd.date_range('2010-01-01', '2012-12-31'))
matrix_weather = pd.DataFrame({'year': dates.dt.year,
'doy': dates.dt.dayofyear,
'to_delete': 1}).set_index(['year', 'doy'])
matrix_weather = pd.merge(matrix_weather, temp, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, rain, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, rad, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, rh, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, wind, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, pet, how='outer', left_index=True, right_index=True)
matrix_weather = pd.merge(matrix_weather, pressure, how='outer', left_index=True, right_index=True)
matrix_weather.loc[:, 'vpa'] = convert_RH_vpa(matrix_weather.loc[:, 'rh'],
matrix_weather.loc[:, 'tmin'],
matrix_weather.loc[:, 'tmax'])
matrix_weather = matrix_weather.fillna(method='ffill')
if return_pet:
matrix_weather.drop(columns=['rh', 'to_delete', 'wind', 'vpa', 'pmsl'], inplace=True)
else:
matrix_weather.drop(columns=['rh', 'to_delete', 'pet', 'pmsl'], inplace=True)
matrix_weather.loc[:, 'max_irr'] = 10.
matrix_weather.loc[:, 'irr_trig'] = 0
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather.loc[:, 'irr_trig_store'] = 0
matrix_weather.loc[:, 'irr_targ_store'] = 1
matrix_weather.loc[:, 'external_inflow'] = 0
matrix_weather.reset_index(inplace=True)
# load harvest data from <NAME>'s paper
harvest_nm = 'harvest_Scott_0.txt'
days_harvest = pd.read_csv(os.path.join(test_dir, harvest_nm),
delim_whitespace=True,
names=['year', 'doy', 'percent_harvest']
).astype(int) # floor matches what simon did.
days_harvest = days_harvest.loc[(days_harvest.year >= 2010) & (days_harvest.year < 2013)]
days_harvest.loc[:, 'frac_harv'] = days_harvest.loc[:, 'percent_harvest'] / 100
days_harvest.loc[:, 'harv_trig'] = 0
days_harvest.loc[:, 'harv_targ'] = 0
days_harvest.loc[:, 'weed_dm_frac'] = 0
days_harvest.loc[:, 'reseed_trig'] = -1
days_harvest.loc[:, 'reseed_basal'] = 1
days_harvest.drop(columns=['percent_harvest'], inplace=True)
# load parameters from simon woodward's paper
params = get_woodward_mean_full_params('scott')
doy_irr = [0]
return params, matrix_weather, days_harvest, doy_irr
def _compair_pet():
"""just to compaire the pet and peyman results, the are slightly differnt,
but I think that is due to different methods of calculating PET,"""
from basgra_python import run_basgra_nz
verbose = False
params, matrix_weather, days_harvest, doy_irr = establish_peyman_input(False)
peyman_out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default',
supply_pet=False)
params, matrix_weather, days_harvest, doy_irr = establish_peyman_input(True)
pet_out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default',
supply_pet=True)
from supporting_functions.plotting import plot_multiple_results
plot_multiple_results({'pet': pet_out, 'peyman': peyman_out})
def establish_org_input(site='scott'):
if site == 'scott':
harvest_nm = 'harvest_Scott_0.txt'
weather_nm = 'weather_Scott.txt'
# col = 1 + 8 * (1)
elif site == 'lincoln':
harvest_nm = 'harvest_Lincoln_0.txt'
weather_nm = 'weather_Lincoln.txt'
# col = 1 + 8 * (3 - 1)
else:
raise ValueError('unexpected site')
params = get_woodward_mean_full_params(site)
matrix_weather = pd.read_csv(os.path.join(test_dir, weather_nm),
delim_whitespace=True, index_col=0,
header=0,
names=['year',
'doy',
'tmin',
'tmax',
'rain',
'radn',
'pet'])
# set start date as doy 121 2011
idx = (matrix_weather.year > 2011) | ((matrix_weather.year == 2011) & (matrix_weather.doy >= 121))
matrix_weather = matrix_weather.loc[idx].reset_index(drop=True)
# set end date as doy 120, 2017
idx = (matrix_weather.year < 2017) | ((matrix_weather.year == 2017) & (matrix_weather.doy <= 120))
matrix_weather = matrix_weather.loc[idx].reset_index(drop=True)
matrix_weather.loc[:, 'max_irr'] = 10.
matrix_weather.loc[:, 'irr_trig'] = 0
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather.loc[:, 'irr_trig_store'] = 0
matrix_weather.loc[:, 'irr_targ_store'] = 1
matrix_weather.loc[:, 'external_inflow'] = 0
days_harvest = pd.read_csv(os.path.join(test_dir, harvest_nm),
delim_whitespace=True,
names=['year', 'doy', 'percent_harvest']
).astype(int) # floor matches what simon did.
days_harvest.loc[:, 'frac_harv'] = days_harvest.loc[:, 'percent_harvest'] / 100
days_harvest.loc[:, 'harv_trig'] = 0
days_harvest.loc[:, 'harv_targ'] = 0
days_harvest.loc[:, 'weed_dm_frac'] = 0
days_harvest.loc[:, 'reseed_trig'] = -1
days_harvest.loc[:, 'reseed_basal'] = 1
days_harvest.drop(columns=['percent_harvest'], inplace=True)
doy_irr = [0]
return params, matrix_weather, days_harvest, doy_irr
def clean_harvest(days_harvest, matrix_weather):
stop_year = matrix_weather['year'].max()
stop_day = matrix_weather.loc[matrix_weather.year == stop_year, 'doy'].max()
days_harvest.loc[(days_harvest.year == stop_year) & (days_harvest.doy > stop_day),
'year'] = -1 # cull harvest after end of weather data
days_harvest = days_harvest.loc[days_harvest.year > 0] # the size matching is handled internally
return days_harvest
def get_org_correct_values():
sample_output_path = os.path.join(test_dir, 'sample_org_output.csv')
sample_data = | pd.read_csv(sample_output_path, index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 2 15:41:04 2021
Run MLR hedonic with run_MLR_on_all_years(features=best1)
use plot_price_rooms_new_from_new_ds for time_series new rooms MLR
for standertized betas use plot_regular_feats_comparison_from_new_ds
For RF, HP tuning :
run_CV_on_all_years(df,savepath=ml_path,model_name='RF', feats=best_rf2+['SEI'])
Multifunction for RF results:
loop_over_RF_models_years(df, path=work_david/'ML', mode='score',
pgrid='normal'
use mode = 'score' to calculate the R^2 for training and test
use mode = 'time-series' to get the predictions.
use mode = 'shap' to calculate the SHAP values for the test sets.(warning this takes longest)
use mode = 'X_test' to get the test sets.
use mode = 'FI' to get feature importances.
then there are plot functions for RF and MLR:
1) plot_RF_time_series(time-series)
2) plot_RF_FI_results(fi)
3) First, produce MLR SHAPS: svs=produce_shap_MLR_all_years(df)
then, produce_RF_abs_SHAP_all_years(path=ml_path/'RF_rooms_345',mlr_shap=svs)
4)
how to produce weighted mean distance to ECs for all Israeli settelments:
first load israeli settelment mid-points:
gdf=geo_location_settelments_israel() (from cbs_procedures)
then run calculate_distance_from_gdf_to_employment_centers:
dis = calculate_distance_from_gdf_to_employment_centers(gdf,n=18, x_coord_name='X', y_coord_name='Y')
finally save to csv:
dis.to_csv(work_david/'Israel_settlments_with_mean_weighted_distance_to_ECs.csv', na_rep='NA',sep=',', index=False)
@author: shlomi
"""
from MA_paths import work_david
from MA_paths import savefig_path
import numpy as np
ml_path = work_david / 'ML'
features = ['Floor_number', 'SEI', 'New', 'Periph_value', 'Sale_year', 'Rooms_345',
'distance_to_nearest_kindergarten', 'distance_to_nearest_school', 'Total_ends']
features1 = ['FLOORNO', 'DEALNATURE', 'NEWPROJECTTEXT',
'BUILDINGYEAR', 'SEI_value', 'Ground', 'P2015_value', 'year', 'Building_Growth_Rate']
features2 = ['FLOORNO', 'DEALNATURE', 'NEWPROJECTTEXT',
'SEI_value', 'Ground', 'year', 'Building_Growth_Rate']
features3 = ['Floor_number', 'SEI', 'New', 'Periph_value', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_4_mokdim']
best = ['SEI', 'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim', 'Netflow']
best1 = ['SEI', 'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim']
best_years = best + ['year_{}'.format(x) for x in np.arange(2001, 2020)]
best_for_bs = best + ['city_code', 'Price']
next_best = ['Floor_number', 'New', 'Sale_year', 'Rooms',
'Total_ends']
best_rf = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms','Netflow',
'Total_ends', 'mean_distance_to_28_mokdim']
best_rf1 = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms',
'Total_ends', 'mean_distance_to_28_mokdim']
best_rf2 = ['SEI_value_2015', 'SEI_value_2017',
'New', 'Sale_year', 'Rooms_345',
'Total_ends', 'mean_distance_to_28_mokdim']
dummies = ['New', 'Rooms_4', 'Rooms_5']
year_dummies = ['year_{}'.format(x) for x in np.arange(2001,2020)]
room_dummies = ['Rooms_4', 'Rooms_5']
best_regular = ['SEI', 'Total_ends', 'mean_distance_to_28_mokdim', 'Netflow']
best_regular1 = ['SEI', 'Total_ends', 'mean_distance_to_28_mokdim']
general_features = ['Price', 'Rooms', 'Area_m2', 'New', 'Floor_number', 'Floors_In_Building',
'Age', 'Total_ends', 'SEI', 'mean_distance_to_28_mokdim']
apts = ['דירה', 'דירה בבית קומות']
apts_more = apts + ["קוטג' דו משפחתי", "קוטג' חד משפחתי",
"דירת גן", "בית בודד", "דירת גג", "דירת גג (פנטהאוז)"]
plot_names = {'Floor_number': 'Floor',
# 'New': 'New Apartment',
'Periph_value': 'Peripheriality',
'distance_to_nearest_kindergarten': 'Nearest kindergarten',
'distance_to_nearest_school': 'Nearest school',
'Total_ends': 'Building rate',
'mean_distance_to_28_mokdim': 'Distance to ECs',
'SEI': 'Socio-Economic Index',
'SEI_value_2015': 'Social-Economic Index',
'SEI_value_2017': 'Social-Economic Index',
'Rooms': 'Rooms', 'Rooms_3': '3 Rooms', 'Rooms_5': '5 Rooms',
'Netflow': 'Net migration',
'MISH': 'AHP',
'New': 'Used/New'
}
short_plot_names = {'Total_ends': 'BR',
'mean_distance_to_28_mokdim': 'Distance',
'SEI': 'SEI', 'New': 'Used/New'}
vars_plot_names = {'Total_ends': 'BR',
'mean_distance_to_28_mokdim': 'DI',
'SEI': 'SE', 'New': 'NE', 'Rooms': 'RM'}
vars_explained_plot_names = {'Total_ends': 'BR (Building Rate)',
'mean_distance_to_28_mokdim': 'DI (Distance to ECs)',
'SEI': 'SE (Socio-Economic Index)', 'New': 'NE (Used/New)', 'Rooms': 'RM (# of Rooms)'}
add_units_dict = {'Distance': 'Distance [km]', 'BR': r'BR [Apts$\cdot$yr$^{-1}$]',
'Netflow': r'Netflow [people$\cdot$yr$^{-1}$]'}
add_units_dict_short = {'DI': 'DI [km]', 'BR': r'BR [Apts$\cdot$yr$^{-1}$]'}
# AHP : Afforable Housing Program
def pct_change(x):
import numpy as np
return (np.exp(x)-1)*100
def plot_single_tree(rf_model, X_train, y_train, est_index=100, samples=25, max_depth=2):
from sklearn import tree
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# rf = RandomForestRegressor(max_depth=15,n_estimators=250)
# feats = ['Status', 'Rooms', 'BR', 'Distance', 'SEI']
X_train = X_train.rename(vars_plot_names, axis=1)
feats = ['NE', 'RM', 'BR', 'DI', 'SE']
# sns.set_theme(font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
inds = X_train.sample(n=samples).index
y_train = np.log(np.exp(y_train)/4)
rf_model.fit(X_train.loc[inds], y_train.loc[inds])
_ = tree.plot_tree(rf_model[est_index],precision=2, fontsize=18, rounded=True,
feature_names=feats, filled=True, ax=ax, max_depth=max_depth, proportion=False)
filename = 'Nadlan_tree_example.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return fig
def compare_r2_RF_MLR(sc, ds, mode='diagram'):
"""compare R2 score from dataset (sc=loop_over with mode=score)
and ds=run_MLR_on_all_years"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.6)
fig, ax = plt.subplots(figsize=(17, 10))
df = ds['R-squared'].to_dataframe()
df = pd.concat([df, sc], axis=1)
df.columns = ['Hedonic', 'RF train', 'RF test']
df['year'] = df.index
df = df.melt(id_vars=['year'], var_name='Model',
value_name=r'R$^2$')
# df['year'] = pd.to_datetime(df['year'], format='%Y')
if mode == 'diagram':
ax = sns.barplot(data=df, x='year', ax=ax, hue='Model', y=r'R$^2$')
# ax.set_ylabel('Apartment area [{}]'.format(unit_label))
h, l =ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, l, ncol=3, title='Model')
ax.set_xlabel('')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
# for wide dataframe:
# df = df.pivot_table(columns=['Model'],values='R$^2$',index='year')
return df
def remove_outlier_area_per_room(df, col='Area_m2', k=1.5):
from Migration_main import remove_outlier
import pandas as pd
dfs = []
for room in df['Rooms'].dropna().unique():
df1 = remove_outlier(df[df['Rooms'] == room], col_name=col, k=k)
dfs.append(df1)
df = pd.concat(dfs, axis=0)
return df
def plot_rooms_area_distribution(df, units='m2'):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
if units == 'ft2':
df['Area_ft2'] = df['Area_m2'] * 10.764
col = 'Area_ft2'
unit_label = 'ft$^2$'
elif units == 'm2':
col = 'Area_m2'
unit_label = 'm$^2$'
sns.violinplot(data=df, x='Rooms', y=col, ax=ax, palette='inferno')
ax.set_ylabel('Apartment area [{}]'.format(unit_label))
ax.set_xlabel('Number of rooms')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def plot_general_features_corr_heatmap(df, feats=general_features, year=None):
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.5)
fig, ax = plt.subplots(figsize=(17, 10))
if year is not None:
df = df[df['Sale_year']==year]
title = 'year = {}'.format(year)
else:
title = '2000 to 2019'
dff = df[feats]
dff = dff.rename(short_plot_names, axis=1)
g = sns.heatmap(dff.corr(),annot=True,cmap='coolwarm', ax=ax, center=0)
g.set_xticklabels(g.get_xticklabels(), rotation=45, ha='right')
fig.tight_layout()
fig.suptitle(title)
fig.subplots_adjust(top=0.945)
return fig
def plot_RF_time_series(X_ts, units='nis'):
"""plot rooms new time series from RF model"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from cbs_procedures import read_mean_salary
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
X_ts = X_ts[X_ts['Rooms'].isin([3, 4, 5])]
X_ts['Rooms'] = X_ts['Rooms'].astype(int)
X_ts = X_ts.rename({'New': 'Used/New'}, axis=1)
X_ts['Used/New'][X_ts['Used/New']==0] = 'Used'
X_ts['Used/New'][X_ts['Used/New']==1] = 'New'
if units == 'dollar':
X_ts['Price'] /= 4 * 1000
ylabel = 'Apartment Price [Thousands $]'
elif units == 'nis':
X_ts['Price'] /= 1e6
ylabel = 'Apartment Price [millions NIS]'
elif units == 'salary':
sal = read_mean_salary().rename({'year': 'Year'}, axis=1)
X_ts = pd.merge(X_ts, sal, on='Year', how='inner')
X_ts['Price'] /= X_ts['mean_salary']
ylabel = 'Mean salary'
X_ts['Year'] = pd.to_datetime(X_ts['Year'], format='%Y')
X_ts = X_ts.reset_index(drop=True)
sns.lineplot(data=X_ts, x='Year', y='Price', hue='Rooms', style='Used/New',
ax=ax, palette='tab10', markers=True, markersize=10)
ax.set_ylabel(ylabel)
ax.set_xlabel('')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def produce_shap_MLR_all_years(df, feats=best1, abs_val=True):
from sklearn.linear_model import LinearRegression
import shap
import numpy as np
years = np.arange(2000, 2020, 1)
svs = []
for year in years:
print(year)
X, y = prepare_new_X_y_with_year(df, features=feats, year=year,
y_name='Price')
lr = LinearRegression()
lr.fit(X, y)
ex = shap.LinearExplainer(lr, X)
shap_values = ex.shap_values(X)
SV = convert_shap_values_to_pandas(shap_values, X)
if abs_val:
print('producing ABS SHAP.')
SV = produce_abs_SHAP_from_df(SV, X, plot=False)
svs.append(SV)
return svs
def loop_over_RF_models_years(df, path=work_david/'ML', mode='score',
pgrid='normal', feats=best_rf2+['SEI']):
import numpy as np
import pandas as pd
import shap
import xarray as xr
years = np.arange(2000, 2020, 1)
train_scores = []
test_scores = []
x_tests = []
fis = []
# shaps = []
for year in years:
print(year)
_, gr = load_HP_params_from_optimized_model(path, pgrid=pgrid,
year=year)
rf = gr.best_estimator_
X_train, X_test, y_train, y_test = produce_X_y_RF_per_year(df,
year=year,
verbose=0, feats=feats)
rf.fit(X_train, y_train)
if mode == 'score':
train_scores.append(rf.score(X_train, y_train))
test_scores.append(rf.score(X_test, y_test))
elif mode == 'time-series':
y_pred = rf.predict(X_test)
y_pred = np.exp(y_pred)
X_test['Price'] = y_pred
X_test['Year'] = year
X_test = X_test.reset_index(drop=True)
x_tests.append(X_test)
elif mode == 'shap':
# rf.fit(X_train, y_train)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X_test.values)
SV = convert_shap_values_to_pandas(shap_values, X_test)
filename = 'Nadlan_SHAP_RF_{}.csv'.format(year)
SV.to_csv(path/filename, index=False)
# SV = SV.to_xarray().to_array('feature')
# return SV, X_test
# shaps.append(SV)
elif mode == 'X_test':
X_test.index.name = 'sample'
filename = 'Nadlan_X_test_RF_{}.csv'.format(year)
X_test.to_csv(path/filename, index=False)
# x_tests.append(X_test.to_xarray().to_array('feature'))
elif mode == 'FI':
fi = pd.DataFrame(rf.feature_importances_).T
fi.columns = X_train.columns
fi['year'] = year
fis.append(fi)
if mode == 'score':
sc = pd.DataFrame(train_scores)
sc.columns = ['train_r2']
sc['test_r2'] = test_scores
sc.index = years
return sc
elif mode == 'time-series':
X_ts = pd.concat(x_tests, axis=0)
return X_ts
elif mode == 'FI':
FI = pd.concat(fis, axis=0)
return FI
# elif mode == 'shap':
# sv_da = xr.concat(shaps, 'year')
# sv_da['year'] = years
# sv_da.attrs['long_name'] = 'Shapley values via SHAP Python package.'
# sv_da.to_netcdf(path/'Nadlan_SHAP_RF_{}-{}.nc'.format(years[0], years[-1]))
# return sv_da
# elif mode == 'X_test':
# X_ts = xr.concat(x_tests, 'year')
# X_ts['year'] = years
# X_ts.attrs['long_name'] = 'X_tests per year to use with the SHAP'
# X_ts.to_netcdf(path/'Nadlan_X_test_RF_{}-{}.nc'.format(years[0], years[-1]))
# return X_ts
def load_all_yearly_shap_values(path=work_david/'ML'):
import numpy as np
years = np.arange(2000, 2020, 1)
svs = []
X_tests = []
for year in years:
sv, X_test = load_yearly_shap_values(path, year)
svs.append(sv)
X_tests.append(X_test)
return svs, X_tests
def load_yearly_shap_values(path=work_david/'ML', year=2000):
import pandas as pd
X_test = pd.read_csv(path/'Nadlan_X_test_RF_{}.csv'.format(year))
shap_values = pd.read_csv(path/'Nadlan_SHAP_RF_{}.csv'.format(year))
assert len(X_test)==len(shap_values)
return shap_values, X_test
def load_shap_values(path=work_david/'ML', samples=10000,
interaction_too=True, rename=True):
import pandas as pd
import xarray as xr
print('loading {} samples.'.format(samples))
X_test = pd.read_csv(path/'X_test_RF_{}.csv'.format(samples))
shap_values = pd.read_csv(path/'SHAP_values_RF_{}.csv'.format(samples))
if rename:
X_test = X_test.rename(short_plot_names, axis=1)
shap_values = shap_values.rename(short_plot_names, axis=1)
if interaction_too:
print('loading interaction values too.')
shap_interaction_values = xr.load_dataarray(path/'SHAP_interaction_values_RF_{}.nc'.format(samples))
shap_interaction_values['feature1'] = X_test.columns
shap_interaction_values['feature2'] = X_test.columns
return X_test, shap_values, shap_interaction_values
else:
return X_test, shap_values
def plot_dependence(shap_values, X_test, x_feature='RM',
y_features=['DI', 'SE', 'BR'],
alpha=0.2, cmap=None, units='pct_change',
plot_size=1.5, fontsize=16, x_jitter=0.75):
import shap
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.ticker as tck
sns.set_theme(style='ticks', font_scale=1.2)
fig, axes = plt.subplots(len(y_features), 1, sharex=True, figsize=(8, 10))
X = X_test.copy()
X = X.rename(vars_plot_names, axis=1)
shap_values = shap_values.rename(vars_plot_names, axis=1)
X = X.rename(add_units_dict_short, axis=1)
# X['Old/New'] = X['Old/New'].astype(int)
# new_dict = {0: 'Old', 1: 'New'}
# X['Old/New'] = X['Old/New'].map(new_dict)
if units == 'pct_change':
shap_values = shap_values.apply(pct_change)
for i, y in enumerate(y_features):
y_new = add_units_dict_short.get(y, y)
shap.dependence_plot(x_feature, shap_values.values, X, x_jitter=x_jitter,
dot_size=4, alpha=alpha, interaction_index=y_new,
ax=axes[i])
if 'DI' in x_feature:
axes[i].set_xlim(25, 150)
if 'RM' in x_feature:
axes[i].set_xlabel('RM [# of rooms]')
cb = fig.axes[-1]
mapp = cb.collections[1]
fig.canvas.draw()
cbar = fig.colorbar(mapp, ax=axes[i],aspect=50, pad=0.05,
label=y_new)
cbar.set_alpha(0.85)
cbar.draw_all()
cb.remove()
# cbar.ax.set_yticklabels(['Low', 'High'], fontsize=fontsize)
# cbar.set_label('Predictor value')
cbar.outline.set_visible(False)
# axes[i].set_ylabel(axes[i].get_ylabel(), fontsize=fontsize)
# axes[i].set_xlabel(axes[i].get_xlabel(), fontsize=fontsize)
# axes[i].tick_params(labelsize=fontsize)
axes[i].grid(True)
if units == 'pct_change':
la = 'Price change\nfor {} [%]'.format(x_feature)
axes[i].set_ylabel(la)
[ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) for ax in fig.axes]
[ax.tick_params(labelsize=fontsize) for ax in fig.axes]
[ax.yaxis.set_major_locator(tck.MaxNLocator(5)) for ax in fig.axes]
fig.tight_layout()
return fig
def plot_summary_shap_values(shap_values, X_test, alpha=0.7, cmap=None,
plot_size=1.5, fontsize=16, units='pct_change'):
import shap
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style='ticks', font_scale=1.8)
X_test = X_test.rename(vars_plot_names, axis=1)
shap_values = shap_values.rename(vars_plot_names, axis=1)
if units == 'pct_change':
shap_values = shap_values.apply(pct_change)
if cmap is None:
shap.summary_plot(shap_values.values, X_test, alpha=alpha, plot_size=plot_size)
else:
if not isinstance(cmap, str):
cm = cmap.get_mpl_colormap()
else:
cm = sns.color_palette(cmap, as_cmap=True)
shap.summary_plot(shap_values.values, X_test, alpha=alpha, cmap=cm, plot_size=plot_size)
if len(shap_values.shape) > 2:
fig = plt.gcf()
[ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) for ax in fig.axes]
[ax.set_title(ax.get_title(), fontsize=fontsize) for ax in fig.axes]
[ax.tick_params(labelsize=fontsize) for ax in fig.axes]
else:
fig, ax = plt.gcf(), plt.gca()
if units == 'pct_change':
ax.set_xlabel('Price change [%]', fontsize=fontsize)
else:
ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)
ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
cb = fig.axes[-1]
cbar = fig.colorbar(cb.collections[1], ticks=[0, 1],
aspect=50, pad=0.05)
cb.remove()
cbar.ax.set_yticklabels(['Low', 'High'], fontsize=fontsize)
cbar.set_label('Predictor value')
cbar.ax.tick_params(size=0)
cbar.outline.set_visible(False)
# cb.set_ylabel(cb.get_ylabel(), fontsize=fontsize)
# cb.tick_params(labelsize=fontsize)
fig.tight_layout()
return fig
def select_years_interaction_term(ds, regressor='SEI'):
regs = ['{}_{}'.format(x, regressor) for x in year_dummies]
ds = ds.sel(regressor=regs)
return ds
def produce_RF_abs_SHAP_all_years(path=ml_path, plot=True, mlr_shap=None,
units=None):
import xarray as xr
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
SVs, X_tests = load_all_yearly_shap_values(path)
k2s = []
for i, year in enumerate(np.arange(2000, 2020, 1)):
shap_df = SVs[i]
# shap_df.drop('year', axis=1, inplace=True)
X_test = X_tests[i]
# X_test.drop('year', axis=1, inplace=True)
k2 = produce_abs_SHAP_from_df(shap_df, X_test, plot=False)
k2['year'] = year
if mlr_shap is not None:
k2['Model'] = 'RF'
k2_mlr = mlr_shap[i]
k2_mlr['year'] = year
k2_mlr['Model'] = 'Hedonic'
k2_mlr = k2_mlr[k2_mlr['Predictor'].isin(best_regular1)]
k2 = pd.concat([k2, k2_mlr], axis=0)
k2s.append(k2)
abs_shap = pd.concat(k2s, axis=0)
abs_shap = abs_shap.reset_index(drop=True)
if plot:
sns.set_theme(style='ticks', font_scale=1.6)
fig, ax = plt.subplots(figsize=(17, 10))
abs_shap['year'] = pd.to_datetime(abs_shap['year'], format='%Y')
abs_shap = abs_shap[abs_shap['Predictor']!='New']
abs_shap = abs_shap[abs_shap['Predictor']!='Rooms']
# order:
order = ['SE (Socio-Economic Index)', 'BR (Building Rate)', 'DI (Distance to ECs)']
abs_shap['Predictor'] = abs_shap['Predictor'].map(vars_explained_plot_names)
abs_shap['SHAP_abs'] *= np.sign(abs_shap['Corr'])
if units == 'pct_change':
abs_shap['SHAP_abs'] = abs_shap['SHAP_abs'].apply(pct_change)
# order = ['Socio-Economic Index', 'Building rate', 'Distance to ECs']
if mlr_shap is not None:
sns.lineplot(data=abs_shap, x='year', y='SHAP_abs', hue='Predictor',
ax=ax, palette='Dark2', ci='sd', markers=True, linewidth=2,
hue_order=order, style='Model', markersize=10)
else:
sns.lineplot(data=abs_shap, x='year', y='SHAP_abs', hue='Predictor',
ax=ax, palette='Dark2', ci='sd', markers=True, linewidth=2,
hue_order=order, markersize=10)
if units == 'pct_change':
ax.set_ylabel('Price change [%]')
else:
ax.set_ylabel("mean |SHAP values|")
ax.set_xlabel('')
ax.grid(True)
h, la = ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, la, ncol=2, loc='center')
sns.despine(fig)
fig.tight_layout()
return abs_shap
def produce_abs_SHAP_from_df(shap_df, X_test, plot=False):
import pandas as pd
shap_v = pd.DataFrame(shap_df)
feature_list = X_test.columns
shap_v.columns = feature_list
df_v = X_test.copy().reset_index()#.drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
if plot:
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(5, 6), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return k2
def ABS_SHAP(df_shap, df):
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.2)
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index()#.drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(5, 6), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return
def plot_simplified_shap_tree_explainer(rf_model):
import shap
rf_model.fit(X, y)
dfX = X.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO', 'co2': r'CO$_2$'}, axis=1)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
ABS_SHAP(shap_values_rf, dfX)
return
def convert_shap_values_to_pandas(shap_values, X_test):
import pandas as pd
SV = pd.DataFrame(shap_values)
SV.columns = X_test.columns
SV.index.name = 'sample'
return SV
def plot_Tree_explainer_shap(rf_model, X_train, y_train, X_test, samples=1000):
import shap
from shap.utils import sample
print('fitting...')
rf_model.fit(X_train, y_train)
# explain all the predictions in the test set
print('explaining...')
explainer = shap.TreeExplainer(rf_model)
# rename features:
X_test = X_test.rename(plot_names, axis=1)
if samples is not None:
print('using just {} samples out of {}.'.format(samples, len(X_test)))
shap_values = explainer.shap_values(sample(X_test, samples).values)
shap.summary_plot(shap_values, sample(X_test, samples))
SV = convert_shap_values_to_pandas(shap_values, sample(X_test, samples))
else:
shap_values = explainer.shap_values(X_test.values)
shap.summary_plot(shap_values, X_test)
SV = convert_shap_values_to_pandas(shap_values, X_test)
# shap.summary_plot(shap_values_rf, dfX, plot_size=1.1)
return SV
# def get_mean_std_from_df_feats(df, feats=best, ignore=['New', 'Rooms_345', 'Sale_year'],
# log=['Total_ends']):
# import numpy as np
# f = [x for x in best if x not in ignore]
# df1 = df.copy()
# if log is not None:
# df1[log] = (df1[log]+1).apply(np.log)
# mean = df1[f].mean()
# std = df1[f].std()
# return mean, std
def produce_rooms_new_years_from_ds_var(ds, dsvar='beta_coef', new_cat='Used/New',
new='New', old='Used'):
import numpy as np
import pandas as pd
df = ds[dsvar].to_dataset('year').to_dataframe().T
dfs = []
# 3 rooms old:
dff = df['const'].apply(np.exp).to_frame('Price')
dff['Rooms'] = 3
dff[new_cat] = old
dfs.append(dff)
# 3 rooms new:
dff = (df['const']+df['New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 3
dff[new_cat] = new
dfs.append(dff)
# 4 rooms old:
dff = (df['const']+df['Rooms_4']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 4
dff[new_cat] = old
dfs.append(dff)
# 4 rooms new:
dff = (df['const']+df['New']+df['Rooms_4']+df['Rooms_4_New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 4
dff[new_cat] = new
dfs.append(dff)
# 5 rooms old:
dff = (df['const']+df['Rooms_5']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 5
dff[new_cat] = old
dfs.append(dff)
# 5 rooms new:
dff = (df['const']+df['New']+df['Rooms_5']+df['Rooms_5_New']).apply(np.exp).to_frame('Price')
dff['Rooms'] = 5
dff[new_cat] = new
dfs.append(dff)
dff = pd.concat(dfs, axis=0)
dff['year'] = dff.index
return dff
def calculate_pct_change_for_long_ds_var(ds_var_long, year=2000):
d = ds_var_long.pivot(index='year', columns=[
'Rooms', 'Old/New'], values='Price')
d_ref = d.loc[year]
d /= d_ref
d -= 1
d *= 100
d['year']=d.index
df = d.melt(id_vars=['year'],value_name='Price')
return df
def calculate_period_pct_change_from_ds(ds, syear=2008, eyear=2019):
beta=produce_rooms_new_years_from_ds_var(ds,'beta_coef')
beta = beta.pivot(index='year', columns=['Rooms', 'Used/New'],
values='Price')
beta.columns = ['{}-{}'.format(rooms, new) for rooms, new in beta.columns]
pct = 100 * (beta.loc[eyear] - beta.loc[syear]) / beta.loc[syear]
return pct
def plot_price_rooms_new_from_new_ds(ds, add_cbs_index=False,
units='nis'):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from cbs_procedures import read_apt_price_index
from cbs_procedures import read_mean_salary
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
beta = produce_rooms_new_years_from_ds_var(ds, 'beta_coef')
# calculate pct change between 2008 and 2019:
pct = (beta.loc[2019,'Price'].values-beta.loc[2008,'Price'].values)/beta.loc[2008,'Price'].values
pct *= 100
beta1 = beta.copy()
beta1.loc[2019, 'pct_change_2019_2008'] = pct
print(beta1.loc[2019])
# calculate pct change Old/New in 2008:
pct=(beta[beta['Used/New']=='New'].loc[2008,'Price']-beta[beta['Used/New']=='Used'].loc[2008,'Price'])/beta[beta['Used/New']=='Used'].loc[2008,'Price']
pct *= 100
print(pct)
# calculate pct change Old/New in 2019:
pct=(beta[beta['Used/New']=='New'].loc[2019,'Price']-beta[beta['Used/New']=='Used'].loc[2019,'Price'])/beta[beta['Used/New']=='Used'].loc[2019,'Price']
pct *= 100
print(pct)
upper = produce_rooms_new_years_from_ds_var(ds, 'CI_95_upper')
lower = produce_rooms_new_years_from_ds_var(ds, 'CI_95_lower')
if units == 'pct_change':
beta = calculate_pct_change_for_long_ds_var(beta, 2000)
upper = calculate_pct_change_for_long_ds_var(upper, 2000)
lower = calculate_pct_change_for_long_ds_var(lower, 2000)
df = pd.concat([lower, beta, upper], axis=0)
if units == 'dollar':
# approx 4 NIS to 1 $ in whole 2000-2019
df['Price'] /= 4 * 1000 # price in thousands of $
ylabel = 'Apartment Price [Thousands $]'
elif units == 'nis':
ylabel = 'Apartment Price [millions NIS]'
df['Price'] /= 1e6
elif units == 'salary':
sal = read_mean_salary()
df = pd.merge(df, sal, on='year', how='inner')
df['Price'] /= df['mean_salary']
ylabel = 'Mean salary'
elif units == 'pct_change':
ylabel = 'Apartment price change from 2000 [%]'
df['year'] = pd.to_datetime(df['year'], format='%Y')
df = df.reset_index(drop=True)
sns.lineplot(data=df, x='year', y='Price', hue='Rooms', style='Used/New',
ax=ax, palette='tab10', ci='sd', markers=True, markersize=10)
ax.set_ylabel(ylabel)
ax.set_xlabel('')
if add_cbs_index:
cbs = read_apt_price_index(path=work_david, resample='AS',
normalize_year=2000)
cbs = cbs.loc['2000':'2019']
if units == 'pct_change':
cbs /= cbs.iloc[0]
cbs -= 1
cbs *= 100
cbs_label = 'Dwellings price index change from 2000 [%]'
cbs.columns = ['Apartment Price Index']
cbs['year'] = pd.to_datetime(cbs.index, format='%Y')
if units != 'pct_change':
twin = ax.twinx()
else:
twin = ax
sns.lineplot(data=cbs, x='year', y='Apartment Price Index', ax=twin,
color='k', linewidth=2)
twin.set_ylabel('Dwellings Price Index')
twin.set_xlabel('')
twin.set_ylim(50, 300)
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return fig
def plot_regular_feats_comparison_from_new_ds(ds,reg_name='Predictor',
feats=best_regular1, units='pct_change'):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set_theme(style='ticks', font_scale=1.8)
fig, ax = plt.subplots(figsize=(17, 10))
dfs = []
df = ds['beta_coef'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
df = ds['CI_95_upper'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
df = ds['CI_95_lower'].to_dataset('year').to_dataframe().T
dff = df[feats].melt(ignore_index=False)
dff['year'] = dff.index
dfs.append(dff)
dff = pd.concat(dfs, axis=0)
dff['regressor'] = dff['regressor'].map(vars_explained_plot_names)
dff = dff.rename({'regressor': reg_name}, axis=1)
dff['year'] = pd.to_datetime(dff['year'], format='%Y')
dff = dff.reset_index(drop=True)
if units == 'pct_change':
dff['value'] = dff['value'].apply(pct_change)
sns.lineplot(data=dff, x='year', y='value', hue=reg_name,
ax=ax, ci='sd', markers=True,
palette='Dark2')
if units == 'pct_change':
ylabel = 'Price change [%]'
else:
ylabel = r'Standardized $\beta$s'
ax.set_ylabel(ylabel)
ax.set_xlabel('')
h, l = ax.get_legend_handles_labels()
ax.legend_.remove()
ax.legend(h, l, ncol=1, title='Predictor', loc='center')
ax.grid(True)
sns.despine(fig)
fig.tight_layout()
return dff
def prepare_new_X_y_with_year(df, year=2000, y_name='Price', features=best1):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
# m, s = get_mean_std_from_df_feats(df)
X, y, scaler = produce_X_y(
df, y_name=y_name, year=year, feats=features, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# X[best_regular] -= m
# X[best_regular] /= s
# regular vars vs. time (years):
# X1 = return_X_with_interaction(X, ['trend'], best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
# X3 = return_X_with_interaction(X, ['trend'], room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X, X2],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def prepare_new_X_y(df, y_name='Price'):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
X, y, scaler = produce_X_y(
df, y_name=y_name, year=None, feats=best_years, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# regular vars vs. time (years):
X1 = return_X_with_interaction(X, year_dummies, best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
# X3 = return_X_with_interaction(X, year_dummies, room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X1, X2],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def prepare_new_X_y_with_trend(df, y_name='Price'):
import pandas as pd
def return_X_with_interaction(X, dummy_list, var_list):
Xs = []
for num_var in var_list:
X1 = get_design_with_pair_interaction(
X, dummy_list+[num_var])
Xs.append(X1)
X1 = pd.concat(Xs, axis=1)
X1 = X1.loc[:, ~X1.columns.duplicated()]
return X1
X, y, scaler = produce_X_y(
df, y_name=y_name, year='trend', feats=best, dummy='Rooms_345',
plot_Xcorr=True, scale_X=True)
# regular vars vs. time (years):
X1 = return_X_with_interaction(X, ['trend'], best_regular)
# rooms dummies and new:
X2 = return_X_with_interaction(X, room_dummies, ['New'])
# rooms dummies and years:
X3 = return_X_with_interaction(X, ['trend'], room_dummies)
# New and years:
# X4 = return_X_with_interaction(X, year_dummies, ['New'])
X = pd.concat([X1, X2, X3],axis=1) #, X3, X4], axis=1)
X = X.loc[:, ~X.columns.duplicated()]
return X, y
def get_design_with_pair_interaction(data, group_pair):
""" Get the design matrix with the pairwise interactions
Parameters
----------
data (pandas.DataFrame):
Pandas data frame with the two variables to build the design matrix of their two main effects and their interaction
group_pair (iterator):
List with the name of the two variables (name of the columns) to build the design matrix of their two main effects and their interaction
Returns
-------
x_new (pandas.DataFrame):
Pandas data frame with the design matrix of their two main effects and their interaction
"""
import pandas as pd
import itertools
x = | pd.get_dummies(data[group_pair]) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 10:12:34 2018
@author: kite
"""
"""
完成策略的回测,绘制以沪深300为基准的收益曲线,计算年化收益、最大回撤、夏普比率
主要的方法包括:
ma10_factor:
is_k_up_break_ma10:当日K线是否上穿10日均线
is_k_down_break_ma10:当日K线是否下穿10日均线
compare_close_2_ma_10:工具方法,某日收盘价和当日对应的10日均线的关系
backtest:回测主逻辑方法,从股票池获取股票后,按照每天的交易日一天天回测
"""
import pickle
from pymongo import DESCENDING, ASCENDING
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from stock_pool_strategy import stock_pool, find_out_stocks
from database import DB_CONN
from factor.ma10_factor import is_k_up_break_ma10, is_k_down_break_ma10
from stock_util import get_trading_dates, compute_drawdown, dynamic_max_drawdown, compute_sharpe_ratio, compute_ir
plt.rcParams['figure.figsize'] = [14, 8]
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plt.style.use('ggplot')
SINGLE_DAY_MAX_DROP_RATE = 0.03
MAX_DROP_RATE = 0.1
ATR_WIN = 14
ATR_RATIO = 2
RISK_RATIO = 0.01
def backtest(begin_date, end_date, stop_method=None, pos_method='equal'):
"""
Arguments:
begin_date: 回测开始日期
end_date: 回测结束日期
stop_method : 止损方式
None : 无止损
fixed : 固定比例止损
float : 浮动止损
ATR_float_dynamic : 动态ATR浮动止损
ATR_float_static : 静态ATR浮动止损
pos_method : 头寸分配方式
equal : 均仓分配头寸
atr : 按照ATR分配头寸
Returns:
Account: 数据类型,dict
init_assets : 初始资产, 默认1E7
history_table : 交割单
net_value : 每日净值
final_net_value : 最终日净值
profit : 收益
day_profit : 每日收益
positions : 每日仓位
stop_loss : 止损的方式和止损参数
position_manage : 头寸管理方式和相关参数
"""
# 记录止损时间点
# stop_lose_position_date_current = []
# stop_lose_position_date = []
# 记录回测账户信息
Account = {}
# 仓位相关的初始化
position_manage = {}
if pos_method == 'equal':
single_position = 2E5
position_manage['头寸分配方式'] = '均仓'
Account['position_manage'] = position_manage
elif pos_method == 'atr':
position_manage['头寸分配方式'] = 'ATR分配头寸'
position_manage['ATR_WIN'] = ATR_WIN
position_manage['RISK_RATIO'] = RISK_RATIO
Account['position_manage'] = position_manage
positions = pd.Series() # 记录每日仓位信息
stop_loss = {}
cash = 1E7
init_assets = cash
Account['init_assets'] = init_assets
Account['start'] = begin_date
Account['end'] = end_date
if stop_method is None:
Account['stop_loss'] = '无止损'
elif stop_method == 'fixed':
stop_loss['单日跌幅比例'] = SINGLE_DAY_MAX_DROP_RATE
stop_loss['累计跌幅比例'] = MAX_DROP_RATE
stop_loss['止损方式'] = '固定比例止损'
Account['stop_loss'] = stop_loss
elif stop_method == 'float':
stop_loss['跌幅比例'] = MAX_DROP_RATE
stop_loss['止损方式'] = '浮动止损'
Account['stop_loss'] = stop_loss
elif (stop_method == 'ATR_float_dynamic') or (stop_method == 'ATR_float_static'):
stop_loss['ATR_WIN'] = ATR_WIN
stop_loss['ATR_RATIO'] = ATR_RATIO
stop_loss['止损方式'] = '动态ATR浮动止损'
Account['stop_loss'] = stop_loss
# 时间为key的净值、收益和同期沪深基准
df_profit = pd.DataFrame(columns=['net_value', 'profit', 'hs300'])
# 时间为key的单日收益和同期沪深基准
df_day_profit = | pd.DataFrame(columns=['profit', 'hs300']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from unittest import TestCase
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy as np
from datetime import datetime as dt
from numpy.testing import assert_array_almost_equal
from parameterized import parameterized
from alphaware.base import (Factor,
FactorContainer)
from alphaware.preprocess import (Winsorizer,
FactorWinsorizer)
class TestWinsorizer(TestCase):
@parameterized.expand([(np.array([1, 2, 3, 10, 4, 1, 3]),
(2.5, 97.5),
np.array([1, 2, 3, 9.1, 4, 1, 3]))])
def test_winsorize(self, x, quantile_range, expected):
calculated = Winsorizer(quantile_range).fit_transform(x)
assert_array_almost_equal(calculated, expected)
def test_factor_winsorizer(self):
index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28'], ['001', '002', '003', '004', '005']],
names=['trade_date', 'ticker'])
data1 = pd.DataFrame(index=index, data=[1.0, 1.0, 1.2, 200.0, 0.9, 5.0, 5.0, 5.1, 5.9, 5.0])
factor_test1 = Factor(data=data1, name='test1')
data2 = pd.DataFrame(index=index, data=[2.6, 2.5, 2.8, 2.9, 2.7, 1.9, -10.0, 2.1, 2.0, 1.9])
factor_test2 = Factor(data=data2, name='test2')
data3 = pd.DataFrame(index=index, data=[3.0, 3.0, 30.0, 5.0, 4.0, 6.0, 7.0, 6.0, 6.0, 5.9])
factor_test3 = Factor(data=data3, name='test3')
fc = FactorContainer('2014-01-30', '2014-02-28', [factor_test1, factor_test2, factor_test3])
quantile_range = (1, 99)
calculated = FactorWinsorizer(quantile_range).fit_transform(fc)
index = pd.MultiIndex.from_product([[dt(2014, 1, 30), dt(2014, 2, 28)], ['001', '002', '003', '004', '005']],
names=['trade_date', 'ticker'])
expected = pd.DataFrame({'test1': [1.0, 1.0, 1.2, 192.048, 0.904, 5.0, 5.0, 5.1, 5.868, 5.0],
'test2': [2.6, 2.504, 2.8, 2.896, 2.7, 1.9, -9.524, 2.096, 2.0, 1.9],
'test3': [3.0, 3.0, 29.0, 5.0, 4.0, 6.0, 6.96, 6.0, 6.0, 5.904]},
index=index)
| assert_frame_equal(calculated, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import yfinance as yf
#get user preferences prior to running script
#ask user to choose whether to use a list of stocks from a csv file or to use the ticker_collector.py script to get after hours stocks from marketwatch.
print(' Welcome to Bacon Saver!', '\n\n', 'Are you using a list of ticker symbols from a csv file,', '\n', 'or would you like to get a list from the after hours screener on marketwatch?')
set_source = input(" Please enter CSV or MOV to set the source of the stock list.")
#ask user to set the EMA days
u_ema = int(input(' Enter the desired number of days to calculate the exponential moving average, 9 is the recommended value.'))
#ask user to set the option chain expiration date
user_ochain = input(' Enter the option chain expiration date in YYYY-MM-DD format.')
def display_status():
print("The source of the stock list will be: ", set_source)
print("The exponential moving average will be calculated using", user_ema, "days")
print("The option chain expiration used will be: ", user_ochain)
print("Please wait while the program runs. Starting script...")
movers_list =[]
movers_df = pd.DataFrame()
def get_movers():
df = pd.read_html("https://www.marketwatch.com/tools/screener/after-hours")
dfg = df[0]
#create new series that keeps only first string value in 'Symbol Symbol' column, discarding anything after space
dfgainers = dfg['Symbol Symbol'].str[:4]
dflosers = df[1]
dflosers2 = dflosers['Symbol Symbol'].str[:4]
dfgainers2 = dfgainers.str.strip()
df_final = dfgainers2.append(dflosers2.str.strip())
df_final2 = pd.DataFrame()
df_final2["Symbol"] = df_final
df_final2.to_csv(r'PATH_TO_CSV')
movers_list.append(df_final2["Symbol"])
#get web list saved to csv file and create new list
manual_list = pd.read_csv(r'PATH_TO_CSV')
auto_list = pd.read_csv(r'PATH_TO_CSV')
#Create Symbols From Dataset
target_symbols_long =[]
target_symbols_short = []
movers_df = pd.DataFrame(movers_list)
short_df = []
long_df = []
master_df = pd.DataFrame()
def screener():
for i in Symbols:
i = i.strip()
stock = yf.Ticker(i)
# get stock info
#print(stock.info['longBusinessSummary'])
# get historical market data
hist = stock.history(period="1mo")
histd = pd.DataFrame(hist)
df = pd.DataFrame()
df["Symbol"] = str(i)
df["Date"] = pd.to_datetime(histd.index, unit='ms')
df["Close"] = histd["Close"].values
df["Volume"] = hist['Volume'].values
df["EMA9"] = df["Close"].ewm(span=u_ema).mean()
df["Symbol"] = stock.ticker
df.fillna("nan")
bool_long = df["EMA9"].iloc[-1] < hist["Close"].iloc[-1] ##create Boolean to filter out stocks to go long on.
if bool_long == True:
target_list_long = df['Symbol'][0]
target_symbols_long.append(target_list_long)
print("Stock ", i, "is within buying parameters. Dataframe: ")
print(df.tail(1))
try:
optc = stock.option_chain(user_ochain)
optcz = optc.calls[['strike', 'lastPrice', 'bid', 'ask', 'volume', 'openInterest', 'impliedVolatility']][9:16]
optczdf = | pd.DataFrame(optcz) | pandas.DataFrame |
# NBA Stats Clustering
# Copyright <NAME>, 2019
# various techniques for dimension reduction
import numpy as np
import pandas as pd
# pca from scratch
# influenced by in-class notebook I did on pca.
def pca(m, dims):
# k dims
k = min(m.shape[-1], dims)
m_t = m.T
mm_t = np.matmul(m, m_t)
m_tm = np.matmul(m_t, m)
# select matrix based on square matrix with smaller dimensions
matrix = mm_t if mm_t.shape[0] < m_tm.shape[0] else m_tm
# compute eigenvalues and eigenvectors
eigvals, eigvecs = np.linalg.eig(matrix)
# get sorted eigenvalues, and we'll use the k principal eigenvectors
e = np.array([x for _,x in sorted(zip(eigvals,eigvecs))[::-1]])
# get those eigenvectors
ek = e[:,:k]
# multiply by original data to get reduced dimension data
me2 = np.matmul(m, ek)
columns = [f'x{i}' for i in range(me2.shape[-1])]
# put into datafram
df = | pd.DataFrame(me2, columns=columns) | pandas.DataFrame |
# Source: https://github.com/Screetsec/Sudomy/issues/28
#
# This fetches the data displayed on the Google Safe Browsing Transparency Report and outputs it as a CSV
# that can be imported into a Kaggle dataset.
# The original visualization can be found here: https://transparencyreport.google.com/safe-browsing/overview
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from datetime import datetime, date
RUN_TIME = int(datetime.utcnow().timestamp() * 1000)
START_TIME = datetime.fromtimestamp(1148194800000 // 1000)
# Here are the URL requests I found on the page:
UNSAFE_URL = f"https://transparencyreport.google.com/transparencyreport/api/v3/safebrowsing/sites?dataset=0&series=malwareDetected,phishingDetected&start=1148194800000&end={RUN_TIME}"
NUMBER_URL = f"https://transparencyreport.google.com/transparencyreport/api/v3/safebrowsing/sites?dataset=1&series=malware,phishing&start=1148194800000&end={RUN_TIME}"
SITES_URL = f"https://transparencyreport.google.com/transparencyreport/api/v3/safebrowsing/sites?start=1148194800000&series=attack,compromised&end={RUN_TIME}"
BROWSER_WARNINGS_URL = f"https://transparencyreport.google.com/transparencyreport/api/v3/safebrowsing/warnings?dataset=users&start=1148194800000&end={RUN_TIME}&series=users"
SEARCH_WARNINGS_URL = f"https://transparencyreport.google.com/transparencyreport/api/v3/safebrowsing/warnings?dataset=search&start=1148194800000&end={RUN_TIME}&series=search"
RESPONSE_TIME_URL = f"https://transparencyreport.google.com/transparencyreport/api/v3/safebrowsing/notify?dataset=1&start=1148194800000&end={RUN_TIME}&series=response"
REINFECTION_URL = f"https://transparencyreport.google.com/transparencyreport/api/v3/safebrowsing/notify?dataset=0&start=1148194800000&end={RUN_TIME}&series=reinfect"
COLUMN_NAMES = [
"WeekOf",
"Malware sites detected",
"Phishing sites detected",
"Malware sites number",
"Phishing sites number",
"Attack sites",
"Compromised sites",
"Browser warnings",
"Search warnings",
"Webmaster response time",
"Reinfection rate"
]
def load_dataframe():
dates = pd.date_range(start=START_TIME, end=datetime.fromtimestamp(RUN_TIME // 1000), freq='W', normalize=True)
df = pd.DataFrame(columns=COLUMN_NAMES)
df["WeekOf"] = dates
df = df.set_index("WeekOf")
return df
df = load_dataframe()
import requests
import json
def fetch_as_json(url):
r = requests.get(url)
c = r.content
c = c[5:]
j = json.loads(c)
return j[0][1]
def malware_phishing_detected(df):
pts = fetch_as_json(UNSAFE_URL)
for pt in pts:
date = pd.to_datetime(pt[0], unit='ms').normalize()
malware = pt[1][0]
phishing = pt[1][1]
malware = malware[0] if malware else np.NaN
phishing = phishing[0] if phishing else np.NaN
df[COLUMN_NAMES[1]][date] = malware
df[COLUMN_NAMES[2]][date] = phishing
return df
def malware_phishing_number(df):
pts = fetch_as_json(NUMBER_URL)
for pt in pts:
date = pd.to_datetime(pt[0], unit='ms').normalize()
malware = pt[1][0]
phishing = pt[1][1]
malware = malware[0] if malware else np.NaN
phishing = phishing[0] if phishing else np.NaN
df[COLUMN_NAMES[3]][date] = malware
df[COLUMN_NAMES[4]][date] = phishing
return df
def site_count(df):
pts = fetch_as_json(SITES_URL)
for pt in pts:
date = pd.to_datetime(pt[0], unit='ms').normalize()
attack = pt[1][0]
comped = pt[1][1]
attack = attack[0] if attack else np.NaN
comped = comped[0] if comped else np.NaN
df[COLUMN_NAMES[5]][date] = attack
df[COLUMN_NAMES[6]][date] = comped
return df
def browser_warnings(df):
pts = fetch_as_json(BROWSER_WARNINGS_URL)
for pt in pts:
date = pd.to_datetime(pt[0], unit='ms').normalize()
value = pt[1][0]
value = value[0] if value else np.NaN
df[COLUMN_NAMES[7]][date] = value
return df
def search_warnings(df):
pts = fetch_as_json(SEARCH_WARNINGS_URL)
for pt in pts:
date = pd.to_datetime(pt[0], unit='ms').normalize()
value = pt[1][0]
value = value[0] if value else np.NaN
df[COLUMN_NAMES[8]][date] = value
return df
def response_time(df):
pts = fetch_as_json(RESPONSE_TIME_URL)
for pt in pts:
date = pd.to_datetime(pt[0], unit='ms').normalize()
value = pt[1][0]
value = value[0] if value else np.NaN
df[COLUMN_NAMES[9]][date] = value
return df
def reinfection_rate(df):
pts = fetch_as_json(REINFECTION_URL)
for pt in pts:
date = | pd.to_datetime(pt[0], unit='ms') | pandas.to_datetime |
# Copyright (c) 2019 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime, timedelta
import json
import os.path
import pandas as pd
# Local
from GenConfigs import *
from .Logger import ScriptLogger
logger = ScriptLogger(loggername='workload_analyzer/perf_mon_analyzer',
filename=os.path.join(DATA_DIR, 'logs', 'WA.log'))
def ReadPQOSMSRMon(pqos_msr_mon_file):
"""
This function parses the output of the pqos-msr-mon.
"""
with open(pqos_msr_mon_file) as f:
lines = f.readlines()
records = {'timestamp': [], 'Core': [], 'IPC': [],
'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}
tmp_records = {'timestamp': [], 'Core': [], 'IPC': [],
'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}
prev_timestamp, index = None, -1
for line_index in range(len(lines)):
line = lines[line_index]
if 'TIME' in line:
index += 1
timestamp = datetime.strptime(line[5:-1], '%Y-%m-%d %H:%M:%S')
if (timestamp != prev_timestamp):
for key, value in tmp_records.items():
if key == 'timestamp':
for i in value:
records[key] += [prev_timestamp +
timedelta(seconds=1.0*i/index)]
else:
records[key] += value
tmp_records = {'timestamp': [], 'Core': [], 'IPC': [
], 'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}
index = 0
prev_timestamp = timestamp
elif 'CORE' in line:
pass
else:
tmp_records['timestamp'].append(index)
separated = line.split(' ')
separated = [v for v in separated if v != '']
tmp_records['Core'].append(int(separated[0]))
tmp_records['IPC'].append(float(separated[1]))
tmp_records['LLC Misses'].append(int(separated[2][:-1])*1000)
tmp_records['LLC Util (KB)'].append(float(separated[3]))
tmp_records['MBL (MB/s)'].append(float(separated[4]))
for key, value in tmp_records.items():
if key == 'timestamp':
for i in value:
records[key] += [prev_timestamp +
timedelta(seconds=1.0*i/index)]
else:
records[key] += value
# return the records as Pandas dataframe
records_df = pd.DataFrame(records)
return records_df
def ReadPerfMon(perf_mon_file):
"""
This function parses the output of the Linux Perf tool.
"""
with open(perf_mon_file) as f:
lines = f.readlines()
records = {'timestamp': []} # more fields are added dynamically
for line in lines:
separated = line.split(' ')
separated = [v for v in separated if v != '']
try:
if 'counted' in separated[2]:
del separated[2]
except:
pass
if (len(separated) < 3) or (len(separated) > 4):
continue
time = float(separated[0])
field = separated[2]
try:
val = int(separated[1].replace(',', ''))
except:
val = None
try:
records[field].append(val)
except:
records[field] = [val] # first element of the list
try:
if records['timestamp'][-1] != time:
records['timestamp'].append(time)
except:
records['timestamp'].append(time) # first append
# return the records as Pandas dataframe
return | pd.DataFrame(records) | pandas.DataFrame |
import pandas as pd
import numpy as np
import lightgbm as lgbm
from scipy import sparse
from datetime import datetime
def load_sparse_matrix(filename):
y = np.load(filename)
z = sparse.coo_matrix((y['data'], (y['row'], y['col'])), shape=y['shape'])
return z
x_train = load_sparse_matrix('../input/train_tfidf.npz')
x_test = load_sparse_matrix('../input/test_tfidf.npz')
target = pd.read_csv('../input/train.csv', usecols=['final_status'])
train = pd.read_csv('../input/train.csv', usecols=['created_at', 'deadline', 'launched_at', 'state_changed_at'])
test = | pd.read_csv('../input/test.csv', usecols=['created_at', 'deadline', 'launched_at', 'state_changed_at', 'project_id']) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
#Prepare data, making it binary (0/1)
df = pd.read_pickle("datasetspandas/dutch_export_transposed.pkl")
ibd = df.loc[df['Unnamed: 0'].str.contains('33_')]
ibd.apply(lambda row: row.astype(str).str.contains('-1').any(), axis=1)
ibd2 = ibd.fillna(0)
ibd2 = ibd2.set_index('Unnamed: 0')
ibd3 = ibd2.apply(pd.to_numeric)
ibd3 = ibd3.where(ibd3 <= 0, 1)
ibd3 = ibd3.where(ibd3 >= 0, 0)
#Check if there any remaining NaNs (-1)
ibd3[ibd3.eq(-1).any(1)]
#Save
ibd3.to_pickle("datasetspandas/dutch_export_transposed_ibd_binary.pkl")
#PCA
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
#Read data
data = pd.read_pickle("datasetspandas/dutch_export_transposed_ibd_binary.pkl")
#Selection of prevalent features, removal of antibodies that are present in less than 5% of samples
def Prevalence(Series):
Prev = Series.sum()/Series.count()
return(Prev)
Prev_thresh = 0.05
print("Computing antibody prevalence")
Prevalences = data.apply(Prevalence, axis=0)
Vector_keep = Prevalences > Prev_thresh #True/False vector of the features with prevalence > 5%
data_f = data[Vector_keep.index[Vector_keep]] #Removal of columns that do not pass the prevalence threshold
#Perform PCA
print("Computing PCA")
pca = PCA(n_components=10) #specify no of principal components
pca.fit(data_f)
Egenvectors = pca.components_
Explained = pca.explained_variance_ratio_ * 100 #Percentage variance explained
Explained = [round(Explained[0],1)], round(Explained[1],1)
PCs = pca.transform(data_f) #Compute PCs using the eigenvectors calculated in the PCA function
Eigenvectors = Egenvectors.T
Eigenvectors = pd.DataFrame(Eigenvectors,columns=["Load_PC1", "Load_PC2", "Load_PC3", "Load_PC4", "Load_PC5", "Load_PC6", "Load_PC7", "Load_PC8", "Load_PC9", "Load_PC10"])
Eigenvectors["Probe"] = data_f.columns
#import sample info
metadata = pd.read_excel("datasetspandas/Metadata IBD cohort_Arno_updated_aug2021.xlsx")
metadata.set_index('Sample_id_WIS')
#Creating a pandas dataframe with the PCs, IDs and cohort info
PCs = pd.DataFrame(PCs,columns=["PC1", "PC2", "PC3", "PC4", "PC5", "PC6", "PC7", "PC8", "PC9", "PC10"])
PCs.index = data_f.index
data_f.index.names = ["Sample_id_WIS"]
PCs_merged = PCs.merge(metadata, on=["Sample_id_WIS"])
#In-between: examining loading factors of PCs
loadingfactors = pd.read_csv("datasetspandas/Loadings_PCIBD.csv")
loadingfactors.sort_values(by='Load_PC1', ascending=False)
proteindata = pd.read_csv("datasetspandas/df_info_AT.csv")
proteindata = proteindata.rename({'Unnamed: 0': 'Probe'}, axis=1)
loadingfactorswithproteins = loadingfactors.merge(proteindata, on=["Probe"])
del loadingfactorswithproteins[{'aa_seq', 'len_seq', 'pos', 'is_pos_cntrl', 'is_neg_cntrl', 'is_phage', 'is_influenza', 'is_allergens', 'is_genome_editing', 'IEDB_DOIDs', 'IEDB_comments',
'IEDB_organism_name', 'IEDB_parent_species', 'is_rand_cntrl', 'is_VFDB', 'is_patho_strain', 'is_IgA_coated_strain', 'is_probio_strain', 'bac_src', 'is_gut_microbiome', 'Unnamed: 0'}]
loadingfactorswithproteins.to_excel("datasetspandas/loadingfactorsIBDcohort.xlsx")
#Visualization of loading factors
sns.set(rc={'figure.figsize':(11.7,8.27)}, font_scale = 1.5)
sns.set(rc={"figure.dpi":300, 'savefig.dpi':300})
sns.set_style("white")
Plot = sns.scatterplot(x="Load_PC1", y="Load_PC2", data=loadingfactors)
#Add % of variance explained by the PC in each axis
Plot.set(xlabel="Load_PC1({N}%)".format(N=str(Explained[0])) , ylabel ="Load_PC2({N}%)".format(N=str(Explained[1])))
Plot.figure.savefig("datasetspandas/PCAIBD-loadingfactors.png")
#Clustering algorithm (KMeans). As visual inspection of PCA seems to indicate two major clusters, set k to 2
print("2-means clustering algorithm")
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2, random_state=0).fit(data_f)
Clusters = kmeans.labels_
#Adding clusters to dataframe
Clusters = pd.DataFrame(Clusters,columns=["Cluster"])
PCs_merged = | pd.concat([PCs_merged, Clusters],axis=1) | pandas.concat |
"""
Classes for porfolio construction
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from numpy.linalg import inv, eig
from scipy.optimize import minimize
import scipy.cluster.hierarchy as sch
from quantfin.statistics import cov2corr
class HRP(object):
"""
Implements Hierarchical Risk Parity
"""
def __init__(self, cov, corr=None, method='single', metric='euclidean'):
"""
Combines the assets in `data` using HRP
returns an object with the following attributes:
- 'cov': covariance matrix of the returns
- 'corr': correlation matrix of the returns
- 'sort_ix': list of sorted column names according to cluster
- 'link': linkage matrix of size (N-1)x4 with structure Y=[{y_m,1 y_m,2 y_m,3 y_m,4}_m=1,N-1].
At the i-th iteration, clusters with indices link[i, 0] and link[i, 1] are combined to form
cluster n+1. A cluster with an index less than n corresponds to one of the original observations.
The distance between clusters link[i, 0] and link[i, 1] is given by link[i, 2]. The fourth value
link[i, 3] represents the number of original observations in the newly formed cluster.
- 'weights': final weights for each asset
:param data: pandas DataFrame where each column is a series of returns
:param method: any method available in scipy.cluster.hierarchy.linkage
:param metric: any metric available in scipy.cluster.hierarchy.linkage
"""
# TODO include detoning as an optional input
assert isinstance(cov, pd.DataFrame), "input 'cov' must be a pandas DataFrame"
self.cov = cov
if corr is None:
self.corr = cov2corr(cov)
else:
assert isinstance(corr, pd.DataFrame), "input 'corr' must be a pandas DataFrame"
self.corr = corr
self.method = method
self.metric = metric
self.link = self._tree_clustering(self.corr, self.method, self.metric)
self.sort_ix = self._get_quasi_diag(self.link)
self.sort_ix = self.corr.index[self.sort_ix].tolist() # recover labels
self.sorted_corr = self.corr.loc[self.sort_ix, self.sort_ix] # reorder correlation matrix
self.weights = self._get_recursive_bisection(self.cov, self.sort_ix)
# TODO self.cluster_nember = sch.fcluster(self.link, t=5, criterion='maxclust')
@staticmethod
def _tree_clustering(corr, method, metric):
dist = np.sqrt((1 - corr)/2)
link = sch.linkage(dist, method, metric)
return link
@staticmethod
def _get_quasi_diag(link):
link = link.astype(int)
sort_ix = pd.Series([link[-1, 0], link[-1, 1]])
num_items = link[-1, 3]
while sort_ix.max() >= num_items:
sort_ix.index = range(0, sort_ix.shape[0]*2, 2) # make space
df0 = sort_ix[sort_ix >= num_items] # find clusters
i = df0.index
j = df0.values - num_items
sort_ix[i] = link[j, 0] # item 1
df0 = pd.Series(link[j, 1], index=i+1)
sort_ix = sort_ix.append(df0) # item 2
sort_ix = sort_ix.sort_index() # re-sort
sort_ix.index = range(sort_ix.shape[0]) # re-index
return sort_ix.tolist()
def _get_recursive_bisection(self, cov, sort_ix):
w = pd.Series(1, index=sort_ix, name='HRP')
c_items = [sort_ix] # initialize all items in one cluster
# c_items = sort_ix
while len(c_items) > 0:
# bi-section
c_items = [i[j:k] for i in c_items for j, k in ((0, len(i) // 2), (len(i) // 2, len(i))) if len(i) > 1]
for i in range(0, len(c_items), 2): # parse in pairs
c_items0 = c_items[i] # cluster 1
c_items1 = c_items[i + 1] # cluster 2
c_var0 = self._get_cluster_var(cov, c_items0)
c_var1 = self._get_cluster_var(cov, c_items1)
alpha = 1 - c_var0 / (c_var0 + c_var1)
w[c_items0] *= alpha # weight 1
w[c_items1] *= 1 - alpha # weight 2
return w
def _get_cluster_var(self, cov, c_items):
cov_ = cov.loc[c_items, c_items] # matrix slice
w_ = self._get_ivp(cov_).reshape(-1, 1)
c_var = np.dot(np.dot(w_.T, cov_), w_)[0, 0]
return c_var
@staticmethod
def _get_ivp(cov):
ivp = 1 / np.diag(cov)
ivp /= ivp.sum()
return ivp
def plot_corr_matrix(self, save_path=None, show_chart=True, cmap='vlag', linewidth=0, figsize=(10, 10)):
"""
Plots the correlation matrix
:param save_path: local directory to save file. If provided, saves a png of the image to the address.
:param show_chart: If True, shows the chart.
:param cmap: matplotlib colormap.
:param linewidth: witdth of the grid lines of the correlation matrix.
:param figsize: tuple with figsize dimensions.
"""
sns.clustermap(self.corr, method=self.method, metric=self.metric, cmap=cmap,
figsize=figsize, linewidths=linewidth,
col_linkage=self.link, row_linkage=self.link)
if not (save_path is None):
plt.savefig(save_path)
if show_chart:
plt.show()
plt.close()
def plot_dendrogram(self, show_chart=True, save_path=None, figsize=(8, 8),
threshold=None):
"""
Plots the dendrogram using scipy's own method.
:param show_chart: If True, shows the chart.
:param save_path: local directory to save file.
:param figsize: tuple with figsize dimensions.
:param threshold: height of the dendrogram to color the nodes. If None, the colors of the nodes follow scipy's
standard behaviour, which cuts the dendrogram on 70% of its height (0.7*max(self.link[:,2]).
"""
plt.figure(figsize=figsize)
dn = sch.dendrogram(self.link, orientation='left', labels=self.sort_ix, color_threshold=threshold)
plt.tight_layout()
if not (save_path is None):
plt.savefig(save_path)
if show_chart:
plt.show()
plt.close()
class MinVar(object):
"""
Implements Minimal Variance Portfolio
"""
# TODO review this class
def __init__(self, data):
"""
Combines the assets in 'data' by finding the minimal variance portfolio
returns an object with the following atributes:
- 'cov': covariance matrix of the returns
- 'weights': final weights for each asset
:param data: pandas DataFrame where each column is a series of returns
"""
assert isinstance(data, pd.DataFrame), "input 'data' must be a pandas DataFrame"
self.cov = data.cov()
eq_cons = {'type': 'eq',
'fun': lambda w: w.sum() - 1}
w0 = np.zeros(self.cov.shape[0])
res = minimize(self._port_var, w0, method='SLSQP', constraints=eq_cons,
options={'ftol': 1e-9, 'disp': False})
if not res.success:
raise ArithmeticError('Convergence Failed')
self.weights = pd.Series(data=res.x, index=self.cov.columns, name='Min Var')
def _port_var(self, w):
return w.dot(self.cov).dot(w)
class IVP(object):
"""
Implements Inverse Variance Portfolio
"""
# TODO review this class
def __init__(self, data, use_std=False):
"""
Combines the assets in 'data' by their inverse variances
returns an object with the following atributes:
- 'cov': covariance matrix of the returns
- 'weights': final weights for each asset
:param data: pandas DataFrame where each column is a series of returns
:param use_std: if True, uses the inverse standard deviation. If False, uses the inverse variance.
"""
assert isinstance(data, pd.DataFrame), "input 'data' must be a pandas DataFrame"
assert isinstance(use_std, bool), "input 'use_variance' must be boolean"
self.cov = data.cov()
w = np.diag(self.cov)
if use_std:
w = np.sqrt(w)
w = 1 / w
w = w / w.sum()
self.weights = pd.Series(data=w, index=self.cov.columns, name='IVP')
class ERC(object):
"""
Implements Equal Risk Contribution portfolio
"""
# TODO review this class
def __init__(self, data, vol_target=0.10):
"""
Combines the assets in 'data' so that all of them have equal contributions to the overall risk of the portfolio.
Returns an object with the following atributes:
- 'cov': covariance matrix of the returns
- 'weights': final weights for each asset
:param data: pandas DataFrame where each column is a series of returns
"""
self.cov = data.cov()
self.vol_target = vol_target
self.n_assets = self.cov.shape[0]
cons = ({'type': 'ineq',
'fun': lambda w: vol_target - self._port_vol(w)}, # <= 0
{'type': 'eq',
'fun': lambda w: 1 - w.sum()})
w0 = np.zeros(self.n_assets)
res = minimize(self._dist_to_target, w0, method='SLSQP', constraints=cons)
self.weights = pd.Series(index=self.cov.columns, data=res.x, name='ERC')
def _port_vol(self, w):
return np.sqrt(w.dot(self.cov).dot(w))
def _risk_contribution(self, w):
return w * ((w @ self.cov) / (self._port_vol(w)**2))
def _dist_to_target(self, w):
return np.abs(self._risk_contribution(w) - np.ones(self.n_assets)/self.n_assets).sum()
class PrincipalPortfolios(object):
"""
Implementation of the 'Principal Portfolios'.
https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3623983
"""
def __init__(self, returns, signals):
# TODO covariance shirinkage using Eigenvalue reconstruction
"""
[DESCRIPTION HERE OF ALL THE ATTRIBUTES]
:param returns:
:param signals: Should already have the appropriate lag.
"""
self.asset_names = list(returns.columns)
self.asset_number = len(self.asset_names)
self.returns, self.signals = self._trim_dataframes(returns, signals)
self.pred_matrix = self._get_prediction_matrix()
self.cov_returns = self._get_covariance_returns()
# Principal Portfolios (PP)
self.svd_left, self.svd_values, self.svd_right = self._get_svd()
self.er_pp = self.svd_values.sum() # equivalent to tr(L @ PI)
self.optimal_selection = self.svd_right @ self.svd_left.T # paper calls this L, proposition 3, pg 13
self.optimal_weights = self._get_optimal_weights()
# Latent factor
self.factor_weights = self._get_factor_weights()
# Symmetry decompositions
self.pi_s, self.pi_a = self._get_symmetry_separation(self.pred_matrix)
# Principal Exposure Portfolios (PEP) - Symmetric Strategies
self.pi_s_eigval, self.pi_s_eigvec = self._get_symmetric_eig()
# Principal Alpha Portfolios (PAP) - Anti-symmetric Strategies
self.pi_a_eigval, self.pi_a_eigvec = self._get_antisymmetric_eig()
def get_pp(self, k=1):
"""
Gets the weights of k-th principal portfolio, shown in euqation 15 of the paper.
:param k: int. The number of the desired principal portfolio.
:return: tuple. First entry are the weights, second is the selection matrix and third is the singular
value, which can be interpreted as the expected return (proposition 4).
"""
assert k <= self.asset_number, "'k' must not be bigger than then number of assets"
uk = self.svd_left[:, k - 1].reshape((-1, 1))
vk = self.svd_right[:, k - 1].reshape((-1, 1))
s = self.signals.iloc[-1].values
singval = self.svd_values[k - 1]
lk = vk @ uk.T
wk = (s.T @ lk)
wk = pd.Series(index=self.asset_names, data=wk, name=f'PP {k}')
return wk, lk, singval
def get_pep(self, k=1, absolute=True):
"""
Gets the weights of k-th principal exposure portfolio (PEP), shown in equation 30 of the paper.
:param k: int. The number of the desired principal exposure portfolio.
:param absolute: If eigenvalues should be sorted on absolute value or not. Default is true, to get the
PEPs in order of expected return.
:return: tuple. First entry are the weights, second is the selection matrix and third is the eigenvalue,
which can be interpreted as the expected return (proposition 6).
"""
assert k <= self.asset_number, "'k' must not be bigger than then number of assets"
eigval, eigvec = self.pi_s_eigval, self.pi_s_eigvec
s = self.signals.iloc[-1].values
if absolute:
signal = np.sign(eigval)
eigvec = eigvec * signal # Switch the signals of the eigenvectors with negative eigenvalues
eigval = np.abs(eigval)
idx = eigval.argsort()[::-1] # re sort eigenvalues based on absolute value and the associated eigenvectors
eigval = eigval[idx]
eigvec = eigvec[:, idx]
vsk = eigvec[:, k - 1].reshape((-1, 1)) # from equation 30
lsk = vsk @ vsk.T
wsk = s.T @ lsk
wsk = pd.Series(data=wsk, index=self.asset_names, name=f'PEP {k}')
return wsk, lsk, eigval[k - 1]
def get_pap(self, k=1):
"""
Gets the weights of k-th principal alpha portfolio (PAP), shown in equation 35 of the paper.
:param k: int. The number of the desired principal alpha portfolio.
:return: tuple. First entry are the weights, second is the selection matrix and third is the
eigenvalue times 2, which can be interpreted as the expected return (proposition 8).
"""
assert k <= self.asset_number/2, "'k' must not be bigger than then half of the number of assets"
eigval, eigvec = self.pi_a_eigval, self.pi_a_eigvec
s = self.signals.iloc[-1].values
v = eigvec[:, k - 1].reshape((-1, 1))
x = v.real
y = v.imag
l = x @ y.T - y @ x.T
w = s.T @ l
w = pd.Series(data=w, index=self.asset_names, name=f'PAP {k}')
return w, l, 2 * eigval[k - 1]
def _get_prediction_matrix(self):
size = self.returns.shape[0]
# dev_mat = np.eye(size) - np.ones((size, size)) * (1 / size)
pi = (1 / size) * (self.returns.values.T @ self.signals.values)
return pi
def _get_optimal_weights(self):
s = self.signals.iloc[-1].values
l = self.optimal_selection
w = s.dot(l) # paper calls this S'L
w = pd.Series(index=self.asset_names, data=w)
return w
def _get_svd(self):
pi = self.pred_matrix
u, sing_values, vt = np.linalg.svd(pi)
return u, sing_values, vt.T
def _get_covariance_returns(self):
cov = self.returns.cov()
return cov
def _get_factor_weights(self):
cov = self.cov_returns.values
s = self.signals.iloc[-1].values
factor_weights = ((s @ inv(cov) @ s)**(-1)) * (inv(cov) @ s)
factor_weights = | pd.Series(data=factor_weights, index=self.asset_names, name='Factor Weights') | pandas.Series |
import numpy as np
import pickle as pkl
import pandas as pd
import os, nltk, argparse, json
from gensim.models import Word2Vec
from tensorflow.contrib.keras import preprocessing
# Get embed_matrix(np.ndarray), word2index(dict) and index2word(dict). All of them including extra unknown word "<unk>" and padding word "<pad>", that is, returned size = param size + 2.
# embedding_model: A pre-trained gensim.models.Word2Vec model.
def build_emb_matrix_and_vocab(embedding_model, keep_in_dict=10000, embedding_size=50):
# 0 th element is the default one for unknown words, and keep_in_dict+1 th element is used as padding.
emb_matrix = np.zeros((keep_in_dict+2, embedding_size))
word2index = {}
index2word = {}
for k in range(1, keep_in_dict+1):
word = embedding_model.wv.index2word[k-1]
word2index[word] = k
index2word[k] = word
emb_matrix[k] = embedding_model[word]
word2index['<unk>'] = 0
index2word[0] = '<unk>'
word2index['<pad>'] = keep_in_dict+1
index2word[keep_in_dict+1] = '<pad>'
return emb_matrix, word2index, index2word
# Get an sentence (list of words) as list of index. All words change into lower form.
def __sent2index(wordlist, word2index):
wordlist = [word.lower() for word in wordlist]
sent_index = [word2index[word] if word in word2index else 0 for word in wordlist]
return sent_index
# Read data from directory <data_dir>, return a list (text) of list (sent) of list (word index).
def __gen_data_imdbv1(data_dir, word2index, forHAN):
data = []
for filename in os.listdir(data_dir):
file = os.path.join(data_dir, filename)
with open(file) as f:
content = f.readline()
if forHAN:
sent_list = nltk.sent_tokenize(content)
sents_word = [nltk.word_tokenize(sent) for sent in sent_list]
sents_index = [__sent2index(wordlist, word2index) for wordlist in sents_word]
data.append(sents_index)
else:
word_list = nltk.word_tokenize(content)
words_index = __sent2index(word_list, word2index)
data.append(words_index)
return data
# Read data from directory <data_dir>, return a list (text) of list (sent) of list (word index).
def __gen_data_scdata(data_file, word2index, forHAN, for_infer=False):
data = []
label = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
jsob = json.loads(line)
if not for_infer:
label.append(int(jsob['label']))
content = jsob['text']
if forHAN:
sent_list = nltk.sent_tokenize(content)
sents_word = [nltk.word_tokenize(sent) for sent in sent_list]
sents_index = [__sent2index(wordlist, word2index) for wordlist in sents_word]
data.append(sents_index)
else:
word_list = nltk.word_tokenize(content)
words_index = __sent2index(word_list, word2index)
data.append(words_index)
return data, label
# Pass in indexed dataset, padding and truncating to corresponding length in both text & sent level.
# return data_formatted(after padding&truncating), text_lens(number of sents), text_sent_lens(number of words in each sents inside the text)
def preprocess_text_HAN(data, max_sent_len, max_text_len, keep_in_dict=10000):
text_lens = [] # how many sents in each text
text_sent_lens = [] # a list of list, how many words in each no-padding sent
data_formatted = [] # padded and truncated data
for text in data:
# 1. text_lens
sent_lens = [len(sent) for sent in text]
text_len = len(sent_lens)
text_right_len = min(text_len, max_text_len)
text_lens.append(text_right_len)
# 2. text_sent_lens & data_formatted
sent_right_lens = [min(sent_len, max_sent_len) for sent_len in sent_lens]
text_formatted = preprocessing.sequence.pad_sequences(text, maxlen=max_sent_len, padding="post", truncating="post", value=keep_in_dict+1)
# sent level's padding & truncating are both done, here are padding and truncating in text level below.
lack_text_len = max_text_len - text_len
if lack_text_len > 0:
# padding
sent_right_lens += [0]*lack_text_len
extra_rows = np.full((lack_text_len, max_sent_len), keep_in_dict+1) # complete-paddinged sents
text_formatted_right_len = np.append(text_formatted, extra_rows, axis=0)
elif lack_text_len < 0:
# truncating
sent_right_lens = sent_right_lens[:max_text_len]
row_index = [max_text_len+i for i in list(range(0, -lack_text_len))]
text_formatted_right_len = np.delete(text_formatted, row_index, axis=0)
else:
# exactly, nothing to do
text_formatted_right_len = text_formatted
text_sent_lens.append(sent_right_lens)
data_formatted.append(text_formatted_right_len)
return data_formatted, text_lens, text_sent_lens
# Pass in indexed dataset, padding and truncating to corresponding length in sent level.
# return data_formatted(after padding&truncating), sent_lens(number of words inside the sent)
def preprocess_text(data, max_sent_len, keep_in_dict=10000):
# 1. sent_lens
sent_lens = []
for sent in data:
sent_len = len(sent)
sent_right_len = min(sent_len, max_sent_len)
sent_lens.append(sent_right_len)
#2. data_formatted
data_formatted = preprocessing.sequence.pad_sequences(data, maxlen=max_sent_len, padding="post", truncating="post", value=keep_in_dict+1)
#print(type(data_formatted))
data_formatted = list(data_formatted)
return data_formatted, sent_lens
# do all things above and save.
def imdbv1(working_dir="../data/aclImdb", forHAN=False):
#============================================================
# 1. embedding matrix, word2index table, index2word table
#============================================================
fname = os.path.join(working_dir, "imdb_embedding")
if os.path.isfile(fname):
embedding_model = Word2Vec.load(fname)
else:
print("please run gen_word_embeddings.py first to generate embeddings!")
exit(1)
print("generate word2index and index2word, get corresponding-sized embedding maxtrix...")
emb_matrix, word2index, index2word = build_emb_matrix_and_vocab(embedding_model)
#================================================================
# 2. indexed dataset: number/int representation, not string
#================================================================
print("tokenizing and word-index-representing...")
train_dir = os.path.join(working_dir, "train")
train_pos_dir = os.path.join(train_dir, "pos")
train_neg_dir = os.path.join(train_dir, "neg")
test_dir = os.path.join(working_dir, "test")
test_pos_dir = os.path.join(test_dir, "pos")
test_neg_dir = os.path.join(test_dir, "neg")
train_pos_data = __gen_data_imdbv1(train_pos_dir, word2index, forHAN)
train_neg_data = __gen_data_imdbv1(train_neg_dir, word2index, forHAN)
train_data = train_neg_data + train_pos_data
test_pos_data = __gen_data_imdbv1(test_pos_dir, word2index, forHAN)
test_neg_data = __gen_data_imdbv1(test_neg_dir, word2index, forHAN)
test_data = test_neg_data + test_pos_data
#================================
# 3. padding and truncating
#================================
print("padding and truncating...")
if forHAN:
x_train, train_text_lens, train_text_sent_lens = preprocess_text_HAN(train_data, max_sent_length, max_text_length)
x_test, test_text_lens, test_text_sent_lens = preprocess_text_HAN(test_data, max_sent_length, max_text_length)
else:
x_train, train_sent_lens = preprocess_text(train_data, max_sent_length)
x_test, test_sent_lens = preprocess_text(test_data, max_sent_length)
y_train = [0]*len(train_neg_data)+[1]*len(train_pos_data)
y_test = [0]*len(test_neg_data)+[1]*len(test_pos_data)
#===============
# 4. saving
#===============
print("save word embedding matrix...")
emb_filename = os.path.join(working_dir, "emb_matrix")
pkl.dump([emb_matrix, word2index, index2word], open(emb_filename, "wb"))
print("save data for training...")
if forHAN:
df_train = pd.DataFrame({'text':x_train, 'label':y_train, 'text_length':train_text_lens, 'sents_length':train_text_sent_lens})
else:
df_train = pd.DataFrame({'text':x_train, 'label':y_train, 'text_length':train_sent_lens})
train_filename = os.path.join(working_dir, "train_df_file")
df_train.to_pickle(train_filename)
print("save data for testing...")
if mode=="han":
df_test = pd.DataFrame({'text':x_test, 'label':y_test, 'text_length':test_text_lens, 'sents_length':test_text_sent_lens})
else:
df_test = pd.DataFrame({'text':x_test, 'label':y_test, 'text_length':test_sent_lens})
test_filename = os.path.join(working_dir, "test_df_file")
df_test.to_pickle(test_filename)
# do all things above and save.
def scdata(working_dir="../data/sentence_consistency_data", forHAN=False):
#============================================================
# 1. embedding matrix, word2index table, index2word table
#============================================================
fname = os.path.join(working_dir, "scdata_embedding")
if os.path.isfile(fname):
embedding_model = Word2Vec.load(fname)
else:
print("please run gen_word_embeddings.py first to generate embeddings!")
exit(1)
print("generate word2index and index2word, get corresponding-sized embedding maxtrix...")
emb_matrix, word2index, index2word = build_emb_matrix_and_vocab(embedding_model)
#================================================================
# 2. indexed dataset: number/int representation, not string
#================================================================
print("tokenizing and word-index-representing...")
train_file = os.path.join(working_dir, "train_data")
test_file = os.path.join(working_dir, "test_data")
valid_file = os.path.join(working_dir, "valid_data")
train_data, y_train = __gen_data_scdata(train_file, word2index, forHAN)
test_data, _ = __gen_data_scdata(test_file, word2index, forHAN, for_infer=True)
valid_data, y_valid = __gen_data_scdata(valid_file, word2index, forHAN)
#================================
# 3. padding and truncating
#================================
print("padding and truncating...")
if forHAN:
x_train, train_text_lens, train_text_sent_lens = preprocess_text_HAN(train_data, max_sent_length, max_text_length)
x_test, test_text_lens, test_text_sent_lens = preprocess_text_HAN(test_data, max_sent_length, max_text_length)
x_valid, valid_text_lens, valid_text_sent_lens = preprocess_text_HAN(valid_data, max_sent_length, max_text_length)
else:
x_train, train_sent_lens = preprocess_text(train_data, max_sent_length)
x_test, test_sent_lens = preprocess_text(test_data, max_sent_length)
x_valid, valid_sent_lens = preprocess_text(valid_data, max_sent_length)
#===============
# 4. saving
#===============
print("save word embedding matrix...")
emb_filename = os.path.join(working_dir, "emb_matrix")
pkl.dump([emb_matrix, word2index, index2word], open(emb_filename, "wb"))
print("save data for training...")
if forHAN:
df_train = pd.DataFrame({'text':x_train, 'label':y_train, 'text_length':train_text_lens, 'sents_length':train_text_sent_lens})
else:
df_train = pd.DataFrame({'text':x_train, 'label':y_train, 'text_length':train_sent_lens})
train_filename = os.path.join(working_dir, "train_df_file")
df_train.to_pickle(train_filename)
print("save data for testing...")
if forHAN:
df_test = | pd.DataFrame({'text':x_test, 'text_length':test_text_lens, 'sents_length':test_text_sent_lens}) | pandas.DataFrame |
from aniachi.systemUtils import Welcome as W
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound
from pyramid.response import FileResponse
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from fbprophet import Prophet
from termcolor import colored
import os
import numpy as np
import pandas as pd
import pkg_resources
import matplotlib.pyplot as plt
import matplotlib
from io import StringIO
from io import BytesIO
import xml.etree.ElementTree as et
import pickle as pkl
import base64
import traceback
import datetime
import openpyxl
import setuptools
import aniachi
import socket
import pyqrcode
import argparse
import textwrap
port = 8080
file ='mx_us.csv'
@view_config(route_name='hello', renderer='home.jinja2')
def hello_world(request):
return {'name': 'Running Server','port':port,'pyramid':pkg_resources.get_distribution('pyramid').version
,'numpy':np.__version__,'pandas':pd.__version__ ,'favicon':'aniachi_logo.png','matplotlib':matplotlib.__version__,
'fbprophet':pkg_resources.get_distribution('fbprophet').version,'openpyxl ':openpyxl.__version__,'setuptools':setuptools.__version__,
'py_common_fetch':pkg_resources.get_distribution('py-common-fetch').version,'host':socket.gethostbyname(socket.gethostname()),
'pyqrcode':pkg_resources.get_distribution('pyqrcode').version,'argparse':argparse.__version__,'pypng':pkg_resources.get_distribution('pypng').version
}
#
#
@view_config(route_name='entry')
def entry_point(request):
return HTTPFound(location='app/welcome')
#
#
def getParamterOrdefault(d,k,v,valid):
aux = v
try:
if (d[k] in valid): aux = d[k]
except Exception as e:
pass
return aux
#
#
def getIntParameter(d,k,v,r):
aux=int(v)
try:
if isinstance(int(d[k]), int):
if int(d[k]) in r:
aux= int(d[k])
except Exception as e:
pass
return aux
def getDataframe():
return pd.read_csv(os.path.join(os.getcwd(),file))
#
#
def getFilteredDataframe():
mx_peso = getDataframe()
mx_peso.columns = ['date', 'mx_usd']
mx_peso.date = pd.to_datetime(mx_peso['date'], format='%Y-%m-%d')
# remove dots
mx_peso = mx_peso[mx_peso['mx_usd'] != '.']
mx_peso.mx_usd = mx_peso.mx_usd.astype(float)
return mx_peso
#
#
def getForecastData(days=120):
mx_peso = getDataframe()
mx_peso.columns = ['date', 'mx_usd']
mx_peso.date = pd.to_datetime(mx_peso['date'], format='%Y-%m-%d')
# remove dots
mx_peso = mx_peso[mx_peso['mx_usd'] != '.']
mx_peso.mx_usd = mx_peso.mx_usd.astype(float)
df = pd.DataFrame.copy(mx_peso, deep=True)
df.columns = ['ds', 'y']
prophet = Prophet(changepoint_prior_scale=0.15)
prophet.fit(df)
forecast = prophet.make_future_dataframe(periods=days, freq='D')
forecast = prophet.predict(forecast)
return prophet, forecast
@view_config(route_name='dataset')
def datasetServer(request):
format = getParamterOrdefault(request.params,'format','default',['html','json','xml','serialized','csv','excel'])
if (format=='csv'):
df = getDataframe()
s = StringIO()
df.to_csv(s)
r = Response(s.getvalue(), content_type='application/CSV', charset='UTF-8')
elif (format=='json'):
df = getDataframe()
s = StringIO()
df.to_json(s)
r = Response(s.getvalue(), content_type='application/json', charset='UTF-8')
elif (format=='xml'):
df = getDataframe()
root = et.Element('root')
for i, row in df.iterrows():
data = et.SubElement(root, 'data')
data.set('iter',str(i))
date = et.SubElement(data, 'date')
value = et.SubElement(data, 'value')
date.text = row[0]
value.text = row[1]
r = Response(et.tostring(root), content_type='application/xml', charset='UTF-8')
elif (format=='html'):
df = getDataframe()
s = StringIO()
df.to_html(s,index=True)
r = Response(s.getvalue(), content_type='text/html', charset='UTF-8')
elif (format=='serialized'):
r = Response(base64.encodebytes(pkl.dumps(getDataframe())).decode('utf-8'), content_type='text/html', charset='UTF-8')
elif (format == 'excel'):
b= BytesIO()
pd.ExcelWriter(b)
getDataframe().to_excel(b)
r = Response(b.getvalue(), content_type='application/force-download', content_disposition='attachment; filename=data.xls')
else:
r = Response('Bad paramters ' + str(request.params), content_type='text/html', charset='UTF-8')
return r
#
#
@view_config(route_name='forecast')
def forecastServer(request):
format = getParamterOrdefault(request.params, 'format', 'default', ['html', 'json', 'xml', 'serialized', 'csv', 'excel'])
days = getIntParameter(request.params, 'days', -1, range(20, 301))
if days == -1 or format=='default':
r = Response('Bad paramters ' + str(request.params), content_type='text/html', charset='UTF-8')
else:
if (format=='csv'):
_ , df = getForecastData(days)
s = StringIO()
df.to_csv(s)
r = Response(s.getvalue(), content_type='text/plain', charset='UTF-8')
elif (format == 'html'):
_ , df = getForecastData(days)
s = StringIO()
df.to_html(s, index=True)
r = Response(s.getvalue(), content_type='text/html', charset='UTF-8')
elif (format=='xml'):
_ , df = getForecastData(days)
root = et.Element('root')
for i, row in df.iterrows():
data = et.SubElement(root, 'row')
data.set('iter', str(i))
for head in df.columns:
aux = et.SubElement(data, head)
aux.text = str(row[head])
r = Response(et.tostring(root), content_type='text/xml', charset='UTF-8')
elif (format=='json'):
_ , df = getForecastData(days)
s = StringIO()
df.to_json(s)
r = Response(s.getvalue(), content_type='text/plain', charset='UTF-8')
elif (format == 'serialized'):
_, df = getForecastData(days)
r = Response(base64.encodebytes(pkl.dumps(df)).decode('utf-8'), content_type='text/html',
charset='UTF-8')
elif (format == 'excel'):
b= BytesIO()
| pd.ExcelWriter(b) | pandas.ExcelWriter |
# 1.题出问题
# 什么样的人在泰坦尼克号中更容易存活?
# 2.理解数据
# 2.1 采集数据
# https://www.kaggle.com/c/titanic
# 2.2 导入数据
# 忽略警告提示
import warnings
warnings.filterwarnings('ignore')
# 导入处理数据包
import numpy as np
import pandas as pd
# 导入数据
# 训练数据集
train = pd.read_csv("./train.csv")
# 测试数据集
test = pd.read_csv("./test.csv")
# 显示所有列
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', 100)
# 训练数据891条
print('训练数据集:', train.shape, "\n", '测试数据集:', test.shape, sep='')
rowNum_train = train.shape[0]
rowNum_test = test.shape[0]
print('kaggle训练数据集行数:', rowNum_train,
'\n'
'kaggle测试数据集行数:', rowNum_test,
sep ='')
#合并数据集,方便同时对两个数据集进行清洗
full = train.append(test, ignore_index = True)
print('合并后的数据集:', full.shape)
'''
describe只能查看数据类型的描述统计信息,对于其他类型的数据不显示,比如字符串类型姓名(name),客舱号(Cabin)
这很好理解,因为描述统计指标是计算数值,所以需要该列的数据类型是数据
'''
# 2.3 查看数据集信息
# 查看数据
print(full.head())
# 获取数据类型列的描述统计信息
full.describe()
# 查看每一列的数据类型,和数据总数
full.info()
'''
数据总共有1309行。
其中数据类型列:年龄(Age)、船舱号(Cabin)里面有缺失数据:
1)年龄(Age)里面数据总数是1046条,缺失了1309-1046=263,缺失率263/1309=20%
2)船票价格(Fare)里面数据总数是1308条,缺失了1条数据
字符串列:
1)登船港口(Embarked)里面数据总数是1307,只缺失了2条数据,缺失比较少
2)船舱号(Cabin)里面数据总数是295,缺失了1309-295=1014,缺失率=1014/1309=77.5%,缺失比较大
这为我们下一步数据清洗指明了方向,只有知道哪些数据缺失数据,我们才能有针对性的处理。
'''
# 3.数据清洗
# 3.1数据预处理
'''
数据总共有1309行。
其中数据类型列:年龄(Age)、船舱号(Cabin)里面有缺失数据:
1)年龄(Age)里面数据总数是1046条,缺失了1309-1046=263,缺失率263/1309=20%
2)船票价格(Fare)里面数据总数是1308条,缺失了1条数据
对于数据类型,处理缺失值最简单的方法就是用平均数来填充缺失值
'''
print('处理前:')
full.info()
# 年龄(Age)
full['Age'] = full['Age'].fillna(full['Age'].mean())
# 船票价格(Fare)
full['Fare'] = full['Fare'].fillna(full['Fare'].mean())
print('处理好后:')
full.info()
# 检查数据处理是否正常
print(full.head())
'''
总数据是1309
字符串列:
1)登船港口(Embarked)里面数据总数是1307,只缺失了2条数据,缺失比较少
2)船舱号(Cabin)里面数据总数是295,缺失了1309-295=1014,缺失率=1014/1309=77.5%,缺失比较大
'''
#登船港口(Embarked):查看里面数据长啥样
'''
出发地点:S=英国南安普顿Southampton
途径地点1:C=法国 瑟堡市Cherbourg
途径地点2:Q=爱尔兰 昆士敦Queenstown
'''
print(full['Embarked'].head())
'''
分类变量Embarked,看下最常见的类别,用其填充
'''
print(full['Embarked'].value_counts())
'''
从结果来看,S类别最常见。我们将缺失值填充为最频繁出现的值:
S=英国南安普顿Southampton
'''
full['Embarked'] = full['Embarked'].fillna( 'S' )
# 船舱号(Cabin):查看里面数据长啥样
print(full['Cabin'].head(), '\n')
# 缺失数据比较多,船舱号(Cabin)缺失值填充为U,表示未知(Uknow)
full['Cabin'] = full['Cabin'].fillna( 'U' )
# 检查数据处理是否正常
print(full.head(), '\n')
# 查看最终缺失值处理情况,记住生成情况(Survived)这里一列是我们的标签,
# 用来做机器学习预测的,不需要处理这一列
full.info()
# 3.2 特征提取
# 3.2.1 数据分类
'''
1.数值类型:
乘客编号(PassengerId),年龄(Age),船票价格(Fare),同代直系亲属人数(SibSp),不同代直系亲属人数(Parch)
2.时间序列:无
3.分类数据:
1)有直接类别的
乘客性别(Sex):男性male,女性female
登船港口(Embarked):出发地点S=英国南安普顿Southampton,途径地点1:C=法国 瑟堡市Cherbourg,出发地点2:Q=爱尔兰 昆士敦Queenstown
客舱等级(Pclass):1=1等舱,2=2等舱,3=3等舱
2)字符串类型:可能从这里面提取出特征来,也归到分类数据中
乘客姓名(Name)
客舱号(Cabin)
船票编号(Ticket)
'''
full.info()
# 3.2.1 分类数据:有直接类别的
# 乘客性别(Sex): 男性male,女性female
# 登船港口(Embarked):出发地点S=英国南安普顿Southampton,途径地点1:C=法国 瑟堡市Cherbourg,出发地点2:Q=爱尔兰 昆士敦Queenstown
# 客舱等级(Pclass):1=1等舱,2=2等舱,3=3等舱
# 性别
# 查看性别数据列
print(full['Sex'].head())
'''
将性别的值映射为数值
男(male)对应数值1,女(female)对应数值0
'''
sex_mapDict = {'male' : 1,
'female' : 0}
# map函数:对Series每个数据应用自定义的函数计算
full['Sex'] = full['Sex'].map(sex_mapDict)
print(full.head())
# 登录港口(Embarked)
'''
登船港口(Embarked)的值是:
出发地点:S=英国南安普顿Southampton
途径地点1:C=法国 瑟堡市Cherbourg
途径地点2:Q=爱尔兰 昆士敦Queenstown
'''
print(full['Embarked'].head())
# 存放提取后的特征
embarkedDf = pd.DataFrame()
'''
使用get_dummies进行one-hot编码,产生虚拟变量(dummy variables),列名前缀是Embarked
'''
embarkedDf = pd.get_dummies(full['Embarked'], prefix='Embarked')
# 添加one-hot编码产生的虚拟变量(dummy variables)到泰坦尼克号数据集full
full = pd.concat([full, embarkedDf], axis = 1)
'''
因为已经使用登船港口(Embarked)进行了one-hot编码产生了它的虚拟变量(dummy variables)
所以这里把登船港口(Embarked)删掉
'''
full.drop('Embarked', axis=1, inplace=True)
print(full.head())
'''
上面drop删除某一列代码解释:
因为drop(name,axis=1)里面指定了name是哪一列,比如指定的是A这一列,axis=1表示按行操作。
那么结合起来就是把A列里面每一行删除,最终结果是删除了A这一列.
简单来说,使用drop删除某几列的方法记住这个语法就可以了:drop([列名1,列名2],axis=1)
'''
# 客舱等级(Pclass)
'''
客舱等级(Pclass):
1=1等舱,2=2等舱,3=3等舱
'''
# 存放提取后的特征
pclassDf = pd.DataFrame()
# 用get_dummies进行one-hot编码,列名前缀是Pclass
pclassDf = pd.get_dummies(full['Pclass'], prefix= 'Pclass')
print(pclassDf.head())
# 添加one-hot编码产生的虚拟变量(dummy variables)到泰坦尼克号数据集full
full = pd.concat([full, pclassDf], axis=1)
# 删除客舱等级(Pclass)这一列
full.drop('Pclass', axis=1, inplace=True)
print(full.head())
# 3.2.1 分类类型:字符串类型
# 从姓名中提取头衔
'''
查看姓名这一列长啥样
注意到在乘客名字(Name)中,有一个非常显著的特点:
乘客头衔每个名字当中都包含了具体的称谓或者说是头衔,将这部分信息提取出来后可以作为非常有用一个新变量,可以帮助我们进行预测。
例如:
Braund, Mr. <NAME>
Heikkinen, <NAME>
<NAME>, <NAME>
Peter, Master. <NAME>
'''
print(full['Name'].head())
#练习从字符串中提取头衔,例如Mr
#split用于字符串分割,返回一个列表
#看到姓名中'Braund, Mr. <NAME>',逗号前面的是“名”,逗号后面是‘头衔. 姓’
namel = 'Braund, Mr. <NAME>'
'''
split用于字符串按分隔符分割,返回一个列表。这里按逗号分隔字符串
也就是字符串'Braund, Mr. <NAME>'被按分隔符,'拆分成两部分[Braund,Mr. Owen Harris]
你可以把返回的列表打印出来瞧瞧,这里获取到列表中元素序号为1的元素,也就是获取到头衔所在的那部分,即Mr. Owen Harris这部分
'''
# Mr. <NAME>
str1 = namel.split(',')[1]
'''
继续对字符串Mr. <NAME>按分隔符'.'拆分,得到这样一个列表[Mr, Owen Harris]
这里获取到列表中元素序号为0的元素,也就是获取到头衔所在的那部分Mr
'''
# Mr.
str2 = str1.split(',')[0]
# strip() 方法用于移除字符串头尾指定的字符(默认为空格)
str3 = str2.strip()
'''
定义函数:从姓名中获取头衔
'''
def getTitle(name):
str1=name.split( ',' )[1] #Mr. <NAME>
str2=str1.split( '.' )[0]#Mr
#strip() 方法用于移除字符串头尾指定的字符(默认为空格)
str3=str2.strip()
return str3
# 存放提取后的特征
titleDf = pd.DataFrame()
# map函数:对Series每个数据应用自定义的函数计算
titleDf['Title'] = full['Name'].map(getTitle)
print(titleDf.head())
'''
定义以下几种头衔类别:
Officer政府官员
Royalty王室(皇室)
Mr已婚男士
Mrs已婚妇女
Miss年轻未婚女子
Master有技能的人/教师
'''
# 姓名中头衔字符串与定义头衔类别的映射关系
title_mapDict = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess":"Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
# map函数:对Series每个数据应用自定义的函数计算
titleDf['Title'] = titleDf['Title'].map(title_mapDict)
# # 使用get_dummies进行one-hot编码
# titleDf['Title'] = titleDf['Title'].map(title_mapDict)
# 使用get_dummies 进行 one-hot编码
titleDf = pd.get_dummies(titleDf['Title'])
print(titleDf.head())
# 添加one-hot编码产生的虚拟变量(dummy variables)到泰坦尼克号数据集full
full = pd.concat([full, titleDf], axis=1)
# 删除名字这一列
full.drop('Name', axis=1, inplace=True)
print('删除名字这一列', full.shape)
full.head()
# 从客舱号中提取客舱类别
# 补充知识:匿名函数
'''
python 使用 lambda 来创建匿名函数。
所谓匿名,意即不再使用 def 语句这样标准的形式定义一个函数,预防如下:
lambda 参数1,参数2:函数体或者表达式
'''
# 定义匿名函数:对两个数相加
sum = lambda a, b: a + b
# 调用sum函数
print("相加后的值为:", sum(10, 20))
'''
客舱号的首字母是客舱的类别
'''
# 查看客舱号的内容
print(full['Cabin'].head())
# 存放客舱号信息
cabinDf = pd.DataFrame()
'''
客场号的类别值是首字母,例如:
C85 类别映射为首字母C
'''
full['Cabin'] = full['Cabin'].map(lambda c : c[0])
# 使用get_dummies进行one-hot编码,列名前缀是Cabin
cabinDf = | pd.get_dummies(full['Cabin'], prefix='Cabin') | pandas.get_dummies |
import pandas as pd
#import seaborn as sns
from matplotlib import pyplot as plt
import pdb
import glob
def get_all_dataframe(
data_source='election_reporting_dot_com',
state='mi',
year=2020,
):
county_folder_list = [
x.split('/')[-2] for x in glob.glob(
'./election_reporting_com/2020/mi/*/'
)
]
state = 'mi'
df_dict_all = {
'party': pd.DataFrame(),
'president': pd.DataFrame(),
'senator': pd.DataFrame()
}
for county in county_folder_list:
print(f'getting dataframe for county {county}')
df_dict = get_county_dataframe(
data_source='election_reporting_dot_com',
state='mi',
county=county,
year=2020
)
# pdb.set_trace()
for x in df_dict.keys():
if x in df_dict_all:
df_dict_all[x] = pd.concat(
[df_dict_all[x], df_dict[x]],
ignore_index=True
)
else:
print(f'key {x} not recognized. precess c to continue')
pdb.set_trace()
return df_dict_all
def get_county_dataframe(
data_source='election_reporting_dot_com',
state='mi',
county='kent',
year=2020
):
'''
get data pandas dataframe dictionary given state, conuty, year and data source
'''
if data_source == 'election_reporting_dot_com':
file_list = glob.glob(
f'./election_reporting_com/{year}/{state}/{county}/*cleaned.csv'
)
df_dict = {}
# df_dict_keys = [
# x.split('/')[-1].split('.')[0] for x in file_list
# ]
df_dict_keys = ['party', 'president', 'senator']
# for x, y in zip(df_dict_keys, file_list):
for y in file_list:
print(f'reading from {y}')
for x in df_dict_keys:
if x in y:
df_dict[x] = pd.read_csv(y)
else:
return None
return df_dict
def plot_president_vs_senator_all_counties(
fig_counter_base=100,
state='mi',
save_figure=True,
year=2020
):
fig_counter = fig_counter_base
df_dict = get_all_dataframe()
df_p = df_dict['president']
df_s = df_dict['senator']
# precincts = df_p['precinct'].unique()
df_merge = | pd.merge(df_p, df_s, suffixes=('_p', '_s'), how='inner', on='precinct') | pandas.merge |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = pd.concat([upload, missing_rows])
X = fill_median(
(summarize_series(df_train).transpose()).merge(
upload, left_index=True, right_on="series_id"
)
)
X.set_index("series_id", inplace=True) # .drop(columns=['series_id'], inplace=True)
to_predict = X[X['model_id'].isna()].drop(columns=['model_id'])
X = X[~X['model_id'].isna()]
Y = X['model_id']
Xf = X.drop(columns=['model_id'])
# from sklearn.linear_model import RidgeClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(Xf, Y)
predicted = clf.predict(to_predict)
result = pd.concat(
[to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")],
axis=1,
)
cols_needed = ['model_id', 'series_id', 'forecast_period']
final = pd.concat(
[X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0
)
final['forecast_period'] = final['forecast_period'].astype(str)
final = final.pivot(values="model_id", columns="series_id", index="forecast_period")
try:
final = final[df_train.columns]
if final.isna().to_numpy().sum() > 0:
raise KeyError("NaN in mosaic generalization")
except KeyError as e:
raise ValueError(
f"mosaic_classifier failed to generalize for all columns: {repr(e)}"
)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
# here split for mosaic or horizontal
if mosaic_or_horizontal(known_matches) == "mosaic":
# make it a dataframe
mosaicy = pd.DataFrame.from_dict(known_matches)
# remove unavailable models
mosaicy = pd.DataFrame(mosaicy[mosaicy.isin(available_models)])
# so we can fill some missing by just using a forward fill, should be good enough
mosaicy.fillna(method='ffill', limit=5, inplace=True)
mosaicy.fillna(method='bfill', limit=5, inplace=True)
if mosaicy.isna().any().any() or mosaicy.shape[1] != df_train.shape[1]:
if full_models is not None:
k2 = pd.DataFrame(mosaicy[mosaicy.isin(full_models)])
else:
k2 = mosaicy.copy()
final = mosaic_classifier(df_train, known=k2)
return final.to_dict()
else:
return mosaicy.to_dict()
else:
# remove any unavailable models
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
# test if generalization is needed
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
startTime = datetime.datetime.now()
# this is meant to fill in any failures
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for horizontal generalization!")
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {mod_id} {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df = forecast_df.reindex(columns=org_idx)
u_forecast_df = u_forecast_df.reindex(columns=org_idx)
l_forecast_df = l_forecast_df.reindex(columns=org_idx)
# combine runtimes
try:
ens_runtime = sum(list(forecasts_runtime.values()), datetime.timedelta())
except Exception:
ens_runtime = datetime.timedelta(0)
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = | pd.concat([u_forecast_df2, c_fore], axis=1) | pandas.concat |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 12:46:24 2018
@author: nmei
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
from glob import glob
import seaborn as sns
from scipy import stats
import numpy as np
from statsmodels.formula.api import ols#,mixedlm
from statsmodels.stats.anova import anova_lm
from utils import eta_squared,omega_squared,resample_ttest,MCPConverter,post_processing
from itertools import combinations
sns.set_style('whitegrid')
sns.set_context('poster')
saving_dir = '../figures/'
df_dir = '../results/for_spss'
if not os.path.exists(df_dir):
os.mkdir(df_dir)
def thresholding(value):
if value < 0.001:
return "***"
elif value < 0.01:
return "**"
elif value < 0.05:
return "*"
else:
return "ns"
def preparation(c):
df_temp = {}
for ((model,window,feature),df_sub) in c.groupby(['Models','Window','Attributes']):
if 'Tree' in model:
df_temp['{}_win{}_{}'.format(model,window,feature)] = df_sub['Values'].values
df_temp = pd.DataFrame(df_temp)
return df_temp
"""
Take the exponential of each of the coefficients to generate the odds ratios.
This tells you how a 1 unit increase or decrease in a variable affects the odds of being high POS.
"""
if __name__ == '__main__':
##########################################################################################################################################
pos_w = glob('../results/experiment_score/Pos_6*.csv')
pos = pd.concat([pd.read_csv(f) for f in pos_w])
pos.to_csv('../results/Pos_6_features.csv',index=False)
att_w = glob('../results/experiment_score/att_6*.csv')
att = pd.concat([pd.read_csv(f) for f in att_w])
att.to_csv('../results/ATT_6_features.csv',index=False)
df = pos.copy()
######################### compared against chance level ###############
df = df[(0<df['window']) & (df['window']<5)]
results = dict(
model=[],
window=[],
pval=[],
t=[],
df=[],
)
for (model,window),df_sub in df.groupby(['model','window']):
df_sub = df_sub.sort_values('sub')
t,pval = stats.ttest_1samp(df_sub['score'].values,0.5,)
results['model'].append(model)
results['window'].append(window)
results['pval'].append(pval)
results['t'].append(t)
results['df'].append(len(df_sub)-1)
results = pd.DataFrame(results)
temp = []
for model, df_sub in results.groupby('model'):
idx_sort = np.argsort(df_sub['pval'])
for name in results.columns:
df_sub[name] = df_sub[name].values[idx_sort]
convert = MCPConverter(pvals=df_sub['pval'].values)
df_pvals = convert.adjust_many()
df_sub['ps_corrected'] = df_pvals['bonferroni'].values
temp.append(df_sub)
results = pd.concat(temp)
results.sort_values(['model','window']).to_csv('../results/Pos_ttest_6_features (scipy).csv',index=False)
##########################################################################
id_vars = ['model',
'score',
'sub',
'window',]
value_vars =[
'correct',
'awareness',
'confidence',
'RT_correct',
'RT_awareness',
'RT_confidence',
]
df_post = post_processing(df,id_vars,value_vars)
# c = df_post.groupby(['Subjects','Models','Window','Attributes']).mean().reset_index()
c = df_post[df_post['Window']<5]
df_temp = preparation(c)
writer = pd.ExcelWriter(os.path.join(df_dir,'pos,6 features,feature importance.xlsx'))
df_temp.to_excel(writer,'sheet1',index=False);writer.save()
df_temp.to_csv(os.path.join(df_dir,'pos,6 features,feature importance.csv'),index=False)
######
#######
#########
df = att.copy()
######################### compared against chance level ###############
df = df[(0<df['window']) & (df['window']<5)]
results = dict(
model=[],
window=[],
pval=[],
t=[],
df=[],
)
for (model,window),df_sub in df.groupby(['model','window']):
df_sub = df_sub.sort_values('sub')
t,pval = stats.ttest_1samp(df_sub['score'].values,0.5,)
results['model'].append(model)
results['window'].append(window)
results['pval'].append(pval)
results['t'].append(t)
results['df'].append(len(df_sub)-1)
results = pd.DataFrame(results)
temp = []
for model, df_sub in results.groupby('model'):
idx_sort = np.argsort(df_sub['pval'])
for name in results.columns:
df_sub[name] = df_sub[name].values[idx_sort]
convert = MCPConverter(pvals=df_sub['pval'].values)
df_pvals = convert.adjust_many()
df_sub['ps_corrected'] = df_pvals['bonferroni'].values
temp.append(df_sub)
results = | pd.concat(temp) | pandas.concat |
# -*- coding: utf-8 -*-
"""Provide unified interfaces for optimization solutions for concentrations.
Based on solution implementations in :mod:`cobra.core.solution`
"""
from cobra.util.solver import check_solver_status
from numpy import array, exp, nan
from optlang.interface import OPTIMAL
from pandas import DataFrame, Series, option_context
from mass.core.mass_configuration import MassConfiguration
from mass.util.util import (
_check_kwargs,
apply_decimal_precision,
get_public_attributes_and_methods,
)
MASSCONFIGURATION = MassConfiguration()
class ConcSolution:
"""A unified interface to a :class:`.ConcSolver` optimization solution.
Notes
-----
The :class:`.ConcSolution` is meant to be constructed by
:func:`get_concentration_solution` please look at that function to fully
understand the :class:`ConcSolution` class.
Attributes
----------
objective_value : float
The (optimal) value for the objective function.
status : str
The solver status related to the solution.
concentrations : pandas.Series
Contains the metabolite concentrations which are the primal values
of metabolite variables.
concentration_reduced_costs : pandas.Series
Contains metabolite reduced costs, which are the dual values of
metabolites variables.
Keqs : pandas.Series
Contains the reaction equilibrium constant values, which are primal
values of Keq variables.
Keq_reduced_costs : pandas.Series
Contains reaction equilibrium constant reduced costs, which are the
dual values of Keq variables.
shadow_prices : pandas.Series
Contains reaction shadow prices (dual values of constraints).
"""
def __init__(
self,
objective_value,
status,
concentrations,
Keqs,
concentration_reduced_costs=None,
Keq_reduced_costs=None,
shadow_prices=None,
):
"""Initialize the ConcSolution."""
super(ConcSolution, self).__init__()
# For solver objective value and status
self.objective_value = objective_value
self.status = status
# For solver variables
self.concentrations = concentrations
self.Keqs = Keqs
# For variable reduced costs
self.concentration_reduced_costs = concentration_reduced_costs
self.Keq_reduced_costs = Keq_reduced_costs
# For constraint shadow prices
self.shadow_prices = shadow_prices
def concentrations_to_frame(self):
"""Get a :class:`pandas.DataFrame` of concs. and reduced costs."""
return DataFrame(
{
"concentrations": self.concentrations,
"reduced_costs": self.concentration_reduced_costs,
}
)
def Keqs_to_frame(self):
"""Get a :class:`pandas.DataFrame` of Keqs and reduced costs."""
return DataFrame({"Keqs": self.Keqs, "reduced_costs": self.Keq_reduced_costs})
def to_frame(self):
"""Get a :class:`pandas.DataFrame` of variables and reduced costs."""
return DataFrame(
{
"variables": self.concentrations.append(self.Keqs),
"reduced_costs": self.concentration_reduced_costs.append(
self.Keq_reduced_costs
),
}
)
def _repr_html_(self):
"""HTML representation of the overview for the ConcSolution.
Warnings
--------
This method is intended for internal use only.
"""
if self.status == OPTIMAL:
with option_context("display.max_rows", 10):
html = (
"<strong><em>Optimal</em> solution with objective "
"value {:.3f}</strong><br>{}".format(
self.objective_value, self.to_frame()._repr_html_()
)
)
else:
html = "<strong><em>{}</em> solution</strong>".format(self.status)
return html
def __repr__(self):
"""Set string representation of the solution instance.
Warnings
--------
This method is intended for internal use only.
"""
if self.status != OPTIMAL:
return "<Solution {0:s} at 0x{1:x}>".format(self.status, id(self))
return "<Solution {0:.3f} at 0x{1:x}>".format(self.objective_value, id(self))
def __getitem__(self, variable):
"""Return the value of a metabolite concentration or reaction Keq.
Parameters
----------
variable : str
A variable ID for a variable in the solution.
Warnings
--------
This method is intended for internal use only.
"""
try:
return self.concentrations[str(variable)]
except KeyError:
pass
try:
return self.Keqs[str(variable)]
except KeyError as e:
raise ValueError(
"{0!r} is not a str ID of a ConcSolution variable.".format(str(e))
)
def __dir__(self):
"""Override default dir() implementation to list only public items.
Warnings
--------
This method is intended for internal use only.
"""
return get_public_attributes_and_methods(self)
get_primal_by_id = __getitem__
def get_concentration_solution(
concentration_solver, metabolites=None, reactions=None, raise_error=False, **kwargs
):
"""Generate a solution representation of a :class:`.ConcSolver` state.
Parameters
---------
concentration_solver : ConcSolver
The :class:`.ConcSolver` containing the mathematical problem solved.
metabolites : list
An iterable of :class:`.MassMetabolite` objects.
Uses :attr:`.ConcSolver.included_metabolites` by default.
reactions : list
An iterable of :class:`.MassReaction` objects.
Uses :attr:`.ConcSolver.included_reactions` by default.
raise_error : bool
Whether to raise an OptimizationError if solver status is not optimal.
**kwargs
decimal_precision :
``bool`` indicating whether to apply the
:attr:`~.MassBaseConfiguration.decimal_precision` attribute of
the :class:`.MassConfiguration` to the solution values.
Default is ``False``.
Returns
-------
ConcSolution
The solution of the optimization as a :class:`ConcSolution` object.
"""
kwargs = _check_kwargs(
{
"decimal_precision": False,
},
kwargs,
)
check_solver_status(concentration_solver.solver.status, raise_error=raise_error)
# Get included metabolites and reactions
metabolites = concentration_solver._get_included_metabolites(metabolites)
reactions = concentration_solver._get_included_reactions(reactions)
# Get variable IDs, metabolites, and reactions for Keqs and constraints
metabolites = [m.id for m in metabolites if m.id in concentration_solver.variables]
Keq_ids = [
r.Keq_str for r in reactions if r.Keq_str in concentration_solver.variables
]
reactions = [r.id for r in reactions if r.id in concentration_solver.constraints]
# Get metabolite and Keq primal values
concs = array([concentration_solver.solver.primal_values[m] for m in metabolites])
Keqs = array([concentration_solver.solver.primal_values[Keq] for Keq in Keq_ids])
if concentration_solver.solver.is_integer:
# Fill irrelevant arrays with nan
reduced_concs = array([nan] * len(metabolites))
reduced_Keqs = array([nan] * len(Keq_ids))
shadow = array([nan] * len(reactions))
else:
# Get reduced cost values and shadow prices
reduced_concs = array(
[concentration_solver.solver.reduced_costs[m] for m in metabolites]
)
reduced_Keqs = array(
[concentration_solver.solver.reduced_costs[Keq] for Keq in Keq_ids]
)
shadow = array(
[concentration_solver.solver.shadow_prices[r] for r in reactions]
)
def transform_values(arr, **kwargs):
"""Transform array from logs to linear space and round if desired."""
if kwargs.get("decimal_precision"):
arr = apply_decimal_precision(arr, MASSCONFIGURATION.decimal_precision)
return arr
objective_value = transform_values(
exp(concentration_solver.solver.objective.value), **kwargs
)
concs = transform_values(exp(concs), **kwargs)
Keqs = transform_values(exp(Keqs), **kwargs)
reduced_concs = transform_values(reduced_concs, **kwargs)
reduced_Keqs = transform_values(reduced_Keqs, **kwargs)
shadow = transform_values(shadow, **kwargs)
return ConcSolution(
objective_value,
concentration_solver.solver.status,
Series(concs, metabolites, name="concentrations"),
Series(Keqs, Keq_ids, name="Keqs"),
Series(reduced_concs, metabolites, name="concentration_reduced_costs"),
Series(reduced_Keqs, Keq_ids, name="Keq_reduced_costs"),
| Series(shadow, reactions, name="shadow_prices") | pandas.Series |
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains algorithms based on linear programming techniques, including mixed-integer linear programming
"""
from __future__ import print_function
import logging
import warnings
import numpy
from IProgress.progressbar import ProgressBar
from IProgress.widgets import Bar, Percentage
from pandas import DataFrame
from sympy import Add
import cobra
from cobra.util import fix_objective_as_constraint
from cobra.exceptions import OptimizationError
from cobra.flux_analysis import find_essential_reactions
from cameo import config
from cameo import ui
from cameo.core.model_dual import convert_to_dual
from cameo.core.strain_design import StrainDesignMethodResult, StrainDesignMethod, StrainDesign
from cameo.core.target import ReactionKnockoutTarget
from cameo.core.utils import get_reaction_for
from cameo.flux_analysis.analysis import phenotypic_phase_plane, flux_variability_analysis
from cameo.flux_analysis.simulation import fba
from cameo.flux_analysis.structural import find_coupled_reactions_nullspace
from cameo.util import reduce_reaction_set, decompose_reaction_groups
from cameo.visualization.plotting import plotter
logger = logging.getLogger(__name__)
__all__ = ["OptKnock"]
class OptKnock(StrainDesignMethod):
"""
OptKnock.
OptKnock solves a bi-level optimization problem, finding the set of knockouts that allows maximal
target production under optimal growth.
Parameters
----------
model : cobra.Model
A model to be used for finding optimal knockouts. Always set a non-zero lower bound on
biomass reaction before using OptKnock.
exclude_reactions : iterable of str or Reaction objects
Reactions that will not be knocked out. Excluding reactions can give more realistic results
and decrease running time. Essential reactions and exchanges are always excluded.
remove_blocked : boolean (default True)
If True, reactions that cannot carry flux (determined by FVA) will be removed from the model.
This reduces running time significantly.
fraction_of_optimum : If not None, this value will be used to constrain the inner objective (e.g. growth) to
a fraction of the optimal inner objective value. If inner objective is not constrained manually
this argument should be used. (Default: None)
exclude_non_gene_reactions : If True (default), reactions that are not associated with genes will not be
knocked out. This results in more practically relevant solutions as well as shorter running times.
use_nullspace_simplification: Boolean (default True)
Use a basis for the nullspace to find groups of reactions whose fluxes are multiples of each other. From
each of these groups only 1 reaction will be included as a possible knockout
Examples
--------
>>> from cameo import models
>>> from cameo.strain_design.deterministic import OptKnock
>>> model = models.bigg.e_coli_core
>>> model.reactions.Biomass_Ecoli_core_w_GAM.lower_bound = 0.1
>>> model.solver = "gurobi" # Using gurobi or cplex is recommended
>>> optknock = OptKnock(model)
>>> result = optknock.run(k=2, target="EX_ac_e", max_results=3)
"""
def __init__(self, model, exclude_reactions=None, remove_blocked=True, fraction_of_optimum=0.1,
exclude_non_gene_reactions=True, use_nullspace_simplification=True, *args, **kwargs):
super(OptKnock, self).__init__(*args, **kwargs)
self._model = model.copy()
self._original_model = model
if "gurobi" in config.solvers:
logger.info("Changing solver to Gurobi and tweaking some parameters.")
if "gurobi_interface" not in model.solver.interface.__name__:
model.solver = "gurobi"
# The tolerances are set to the minimum value. This gives maximum precision.
problem = model.solver.problem
problem.params.NodeMethod = 1 # primal simplex node relaxation
problem.params.FeasibilityTol = 1e-9
problem.params.OptimalityTol = 1e-3
problem.params.IntFeasTol = 1e-9
problem.params.MIPgapAbs = 1e-9
problem.params.MIPgap = 1e-9
elif "cplex" in config.solvers:
logger.debug("Changing solver to cplex and tweaking some parameters.")
if "cplex_interface" not in self._model.solver.interface.__name__:
self._model.solver = "cplex"
problem = self._model.solver.problem
problem.parameters.mip.strategy.startalgorithm.set(1)
problem.parameters.simplex.tolerances.feasibility.set(1e-8)
problem.parameters.simplex.tolerances.optimality.set(1e-8)
problem.parameters.mip.tolerances.integrality.set(1e-8)
problem.parameters.mip.tolerances.absmipgap.set(1e-8)
problem.parameters.mip.tolerances.mipgap.set(1e-8)
else:
warnings.warn("You are trying to run OptKnock with %s. This might not end well." %
self._model.solver.interface.__name__.split(".")[-1])
if fraction_of_optimum is not None:
fix_objective_as_constraint(self._model, fraction=fraction_of_optimum)
if remove_blocked:
self._remove_blocked_reactions()
if exclude_reactions:
# Convert exclude_reactions to reaction ID's
exclude_reactions = [
r.id if isinstance(r, cobra.core.Reaction) else r for r in exclude_reactions
]
for r_id in exclude_reactions:
if r_id not in self._model.reactions:
raise ValueError("Excluded reaction {} is not in the model".format(r_id))
else:
exclude_reactions = []
if exclude_non_gene_reactions:
exclude_reactions += [r.id for r in self._model.reactions if not r.genes]
self._build_problem(exclude_reactions, use_nullspace_simplification)
def _remove_blocked_reactions(self):
fva_res = flux_variability_analysis(self._model, fraction_of_optimum=0)
# FIXME: Iterate over the index only (reaction identifiers).
blocked = [
self._model.reactions.get_by_id(reaction) for reaction, row in fva_res.data_frame.iterrows()
if (round(row["lower_bound"], config.ndecimals) == round(
row["upper_bound"], config.ndecimals) == 0)
]
self._model.remove_reactions(blocked)
def _reduce_to_nullspace(self, reactions):
self.reaction_groups = find_coupled_reactions_nullspace(self._model)
reaction_groups_keys = [set(group) for group in self.reaction_groups]
reduced_reactions = reduce_reaction_set(reactions, reaction_groups_keys)
return reduced_reactions
def _build_problem(self, exclude_reactions, use_nullspace_simplification):
logger.debug("Starting to formulate OptKnock problem")
self.essential_reactions = find_essential_reactions(self._model, processes=1).union(self._model.boundary)
if exclude_reactions:
self.exclude_reactions = set.union(
self.essential_reactions,
set(self._model.reactions.get_by_id(r) for r in exclude_reactions)
)
reactions = set(self._model.reactions) - self.exclude_reactions
if use_nullspace_simplification:
reactions = self._reduce_to_nullspace(reactions)
else:
self.reaction_groups = None
self._make_dual()
self._combine_primal_and_dual()
logger.debug("Primal and dual successfully combined")
y_vars = {}
constrained_dual_vars = set()
for reaction in reactions:
if reaction not in self.exclude_reactions and reaction.lower_bound <= 0 <= reaction.upper_bound:
y_var, constrained_vars = self._add_knockout_constraints(reaction)
y_vars[y_var] = reaction
constrained_dual_vars.update(constrained_vars)
self._y_vars = y_vars
primal_objective = self._model.solver.objective
dual_objective = self._model.solver.interface.Objective.clone(
self._dual_problem.objective, model=self._model.solver)
reduced_expression = Add(*((c * v) for v, c in dual_objective.expression.as_coefficients_dict().items()
if v not in constrained_dual_vars))
dual_objective = self._model.solver.interface.Objective(reduced_expression, direction=dual_objective.direction)
optimality_constraint = self._model.solver.interface.Constraint(
primal_objective.expression - dual_objective.expression,
lb=0, ub=0, name="inner_optimality")
self._model.solver.add(optimality_constraint)
logger.debug("Inner optimality constrained")
logger.debug("Adding constraint for number of knockouts")
knockout_number_constraint = self._model.solver.interface.Constraint(
Add(*y_vars), lb=len(y_vars), ub=len(y_vars)
)
self._model.solver.add(knockout_number_constraint)
self._number_of_knockouts_constraint = knockout_number_constraint
def _make_dual(self):
dual_problem = convert_to_dual(self._model.solver)
self._dual_problem = dual_problem
logger.debug("Dual problem successfully created")
def _combine_primal_and_dual(self):
primal_problem = self._model.solver
dual_problem = self._dual_problem
for var in dual_problem.variables:
var = primal_problem.interface.Variable.clone(var)
primal_problem.add(var)
for const in dual_problem.constraints:
const = primal_problem.interface.Constraint.clone(const, model=primal_problem)
primal_problem.add(const)
def _add_knockout_constraints(self, reaction):
interface = self._model.solver.interface
y_var = interface.Variable("y_" + reaction.id, type="binary")
self._model.solver.add(interface.Constraint(reaction.flux_expression - 1000 * y_var, ub=0))
self._model.solver.add(interface.Constraint(reaction.flux_expression + 1000 * y_var, lb=0))
constrained_vars = []
if reaction.upper_bound != 0:
dual_forward_ub = self._model.solver.variables["dual_" + reaction.forward_variable.name + "_ub"]
self._model.solver.add(interface.Constraint(dual_forward_ub - 1000 * (1 - y_var), ub=0))
constrained_vars.append(dual_forward_ub)
if reaction.lower_bound != 0:
dual_reverse_ub = self._model.solver.variables["dual_" + reaction.reverse_variable.name + "_ub"]
self._model.solver.add(interface.Constraint(dual_reverse_ub - 1000 * (1 - y_var), ub=0))
constrained_vars.append(dual_reverse_ub)
return y_var, constrained_vars
def run(self, max_knockouts=5, biomass=None, target=None, max_results=1, *args, **kwargs):
"""
Perform the OptKnock simulation
Parameters
----------
target: str, Metabolite or Reaction
The design target
biomass: str, Metabolite or Reaction
The biomass definition in the model
max_knockouts: int
Max number of knockouts allowed
max_results: int
Max number of different designs to return if found
Returns
-------
OptKnockResult
"""
# TODO: why not required arguments?
if biomass is None or target is None:
raise ValueError('missing biomass and/or target reaction')
target = get_reaction_for(self._model, target, add=False)
biomass = get_reaction_for(self._model, biomass, add=False)
knockout_list = []
fluxes_list = []
production_list = []
biomass_list = []
loader_id = ui.loading()
with self._model:
self._model.objective = target.id
self._number_of_knockouts_constraint.lb = self._number_of_knockouts_constraint.ub - max_knockouts
count = 0
while count < max_results:
try:
solution = self._model.optimize(raise_error=True)
except OptimizationError as e:
logger.debug("Problem could not be solved. Terminating and returning " + str(count) + " solutions")
logger.debug(str(e))
break
knockouts = tuple(reaction for y, reaction in self._y_vars.items() if round(y.primal, 3) == 0)
assert len(knockouts) <= max_knockouts
if self.reaction_groups:
combinations = decompose_reaction_groups(self.reaction_groups, knockouts)
for kos in combinations:
knockout_list.append({r.id for r in kos})
fluxes_list.append(solution.fluxes)
production_list.append(solution.objective_value)
biomass_list.append(solution.fluxes[biomass.id])
else:
knockout_list.append({r.id for r in knockouts})
fluxes_list.append(solution.fluxes)
production_list.append(solution.objective_value)
biomass_list.append(solution.fluxes[biomass.id])
# Add an integer cut
y_vars_to_cut = [y for y in self._y_vars if round(y.primal, 3) == 0]
integer_cut = self._model.solver.interface.Constraint(Add(*y_vars_to_cut),
lb=1,
name="integer_cut_" + str(count))
if len(knockouts) < max_knockouts:
self._number_of_knockouts_constraint.lb = self._number_of_knockouts_constraint.ub - len(knockouts)
self._model.add_cons_vars(integer_cut)
count += 1
ui.stop_loader(loader_id)
return OptKnockResult(self._original_model, knockout_list, fluxes_list,
production_list, biomass_list, target.id, biomass)
class RobustKnock(StrainDesignMethod):
pass
class OptKnockResult(StrainDesignMethodResult):
__method_name__ = "OptKnock"
def __init__(self, model, knockouts, fluxes, production_fluxes, biomass_fluxes, target, biomass, *args, **kwargs):
super(OptKnockResult, self).__init__(self._generate_designs(knockouts), *args, **kwargs)
self._model = model
self._knockouts = knockouts
self._fluxes = fluxes
self._production_fluxes = production_fluxes
self._biomass_fluxes = biomass_fluxes
self._target = target
self._biomass = biomass
self._processed_knockouts = None
@staticmethod
def _generate_designs(knockouts):
designs = []
for knockout_design in knockouts:
designs.append(StrainDesign([ReactionKnockoutTarget(ko for ko in knockout_design)]))
return designs
def _process_knockouts(self):
progress = ProgressBar(maxval=len(self._knockouts), widgets=["Processing solutions: ", Bar(), Percentage()])
self._processed_knockouts = DataFrame(columns=["reactions", "size", self._target,
"biomass", "fva_min", "fva_max"])
for i, knockouts in progress(enumerate(self._knockouts)):
try:
with self._model:
[self._model.reactions.get_by_id(ko).knock_out() for ko in knockouts]
fva = flux_variability_analysis(self._model, fraction_of_optimum=0.99, reactions=[self.target])
self._processed_knockouts.loc[i] = [knockouts, len(knockouts), self.production[i], self.biomass[i],
fva.lower_bound(self.target), fva.upper_bound(self.target)]
except OptimizationError:
self._processed_knockouts.loc[i] = [numpy.nan for _ in self._processed_knockouts.columns]
@property
def knockouts(self):
return self._knockouts
@property
def fluxes(self):
return self._fluxes
@property
def production(self):
return self._production_fluxes
@property
def biomass(self):
return self._biomass_fluxes
@property
def target(self):
return self._target
def display_on_map(self, index=0, map_name=None, palette="YlGnBu"):
with self._model:
for ko in self.data_frame.loc[index, "reactions"]:
self._model.reactions.get_by_id(ko).knock_out()
fluxes = fba(self._model)
fluxes.display_on_map(map_name=map_name, palette=palette)
def plot(self, index=0, grid=None, width=None, height=None, title=None, palette=None, **kwargs):
wt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass.id])
with self._model:
for ko in self.data_frame.loc[index, "reactions"]:
self._model.reactions.get_by_id(ko).knock_out()
mt_production = phenotypic_phase_plane(self._model, objective=self._target, variables=[self._biomass.id])
if title is None:
title = "Production Envelope"
dataframe = | DataFrame(columns=["ub", "lb", "value", "strain"]) | pandas.DataFrame |
""" Virtual File System
File system abstraction with implementations for HDFS and local files
"""
from abc import ABC, abstractmethod
import io
import json
import logging
import os
import pickle
import re
import tempfile
from typing import Any, Generator, Dict, List, Tuple
import urllib
from functools import total_ordering
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import yaml
try:
import pyorc
except ModuleNotFoundError:
pyorc = None
from lvfs.stat import Stat
@total_ordering
class URL(ABC):
#
# Create URLs
#
""" Parsing and transformation of string-encoded URLs """
def __init__(self, raw: str):
""" Create a new URL from a string.
Prefer URL.to unless you know what you're doing
Accepts
-------
raw : str
The raw string representation of the URL, to be parsed
"""
self.raw: str = raw
def __hash__(self):
""" Support __hash__ based on the raw
Returns
-------
int
A uniformly-distributed but deterministic integral coimage corresponding to this
URL's raw string representation
"""
# Needed to sensibly use it as a dict key
return hash(self.raw)
def __eq__(self, value):
""" Support __eq__ based on the raw string
Accepts
-------
value : URL or str
The URL to compare self to
"""
# __ne__ delegates to __eq__ so we don't need __ne__
return self.raw == value
def __lt__(self, value):
""" Comparison operators work on the raw string
Accepts
-------
value : URL or str
The URL to compare self to
"""
# "@total_ordering class URL" handles the other operators
return self.raw < value
def __repr__(self) -> str:
""" Show a URL in a user-readable format """
return f"URL.to({repr(self.raw)})"
@staticmethod
def to(path):
""" Placeholder for the actual implementation in lvfs.__init__
This implemention exists only to satisfy type checking.
Accepts
-------
path : URL or str
URL passed through, or string to be parsed into a URL
Returns
-------
URL
A new object of a subclass of URL, dependent on protocol
"""
# Totally bogus, merely to use the parameter and satisfy flake8
return URL(path)
#
# Parse and modify URLs
#
def parse(self) -> urllib.parse.ParseResult:
""" Parse the raw URL into parts using urllib
Returns a ParseResult.
Example:
>>> urllib.parse.urlparse("derk://admin@uhhuh/local/thing;xyz?key=value&key2=value2#4")
ParseResult(
scheme='derk', netloc='admin@uhhuh', path='/local/thing', params='xyz',
query='key=value&key2=value2', fragment='4'
)
"""
return urllib.parse.urlparse(self.raw)
@property
def protocol(self) -> str:
""" Get the protocol of this url, or "file" if none """
return self.parse().scheme or "file"
@property
def dirname(self) -> str:
""" Get the path part of the parent of this URL, if it exists """
return os.path.dirname(self.parse().path)
@property
def basename(self) -> str:
""" Get the terminus of the path """
return os.path.basename(self.parse().path)
@property
def host(self) -> str:
""" Return the hostname, or bucket name in the case of GCS """
return self.parse().netloc.split("@", 1)[-1]
@property
def user(self) -> str:
""" Return the username to use for authentication. Useful for HDFS. """
fields = self.parse().netloc.split("@", 1)
return fields[0] if len(fields) == 2 else None
def with_user(self, user: str):
""" Change the username used for authentication. Useful for HDFS.
Accepts
-------
user : str
The username to associate with this URL
Returns
-------
URL
A new URL with the specified username and everything else the same
"""
p = self.parse()
fields = p.netloc.split("@", 1)
p = p._replace(netloc=f"{user}@{fields[-1]}" if user else fields[-1])
return URL.to(p.geturl())
@property
def path(self) -> str:
""" Get just the path of this URL """
return self.parse().path
@property
def parent(self):
""" Get the parent of this URL, as a URL (as opposed to dirname, which is a str)
Returns
-------
URL
A new URL made from the parsed components.
"""
# Accessing a private method -- but it seems pretty straightforward...
parts = self.parse()
return URL.to(parts._replace(path=os.path.dirname(parts.path)).geturl())
def join(self, suffix):
""" Return a new URL with another segment appended to the path
Accepts
-------
suffix : str
The segment to append to this URL.
This does not accept URLs, only strings.
Returns
-------
URL
A new URL with the appended segment
"""
if isinstance(suffix, URL):
raise TypeError(
"Can't join URLs. URLs are always absolute. Join a URL and a string instead."
)
return URL.to(self.raw.rstrip("/") + "/" + suffix)
#
# Abstract methods, *to be implemented by subclasses*
#
@abstractmethod
async def read_binary(self) -> bytes:
""" Read a file to a string of bytes """
raise NotImplementedError
@abstractmethod
async def write_binary(self, content: bytes, overwrite: bool = True):
""" Create or replace a file with a string of bytes
Accepts
-------
content : bytes
What to fill the target file with
overwrite : bool
Allow overwriting an existing file.
Keep in mind that not every file system supports this concept,
so with the specific implementations you plan to use.
"""
raise NotImplementedError
@abstractmethod
async def ls(self, recursive: bool = False):
""" Get the list of files in this directory, if it is one
Returns a list of URL objects. Results are always absolute.
*DO NOT `root.join(file)`*
Returns
-------
List[URL]
All the direct children, or all the recursive children
"""
raise NotImplementedError
@abstractmethod
async def stat(self) -> Stat:
""" Get basic stat results for this file """
raise NotImplementedError
@abstractmethod
async def mkdir(self, ignore_if_exists: bool = False):
""" Create an empty directory and parent directories recursively
Accepts
-------
ignore_if_exists: boolean: DEPRECATED
Included for backward compatibility. Existing directories are always ignored.
"""
raise NotImplementedError
def supports_permissions(self) -> bool:
""" Some implementations, like blobs, do not always support permissions,
If this method returns true, the file system supports permissions
"""
return True
async def chmod(self, mode: int):
""" Modify permissions of the file so it has the desired mode """
raise NotImplementedError
@abstractmethod
async def unlink(self, ignore_if_missing: bool = False):
""" A lower-level method for removing one file or directory, to be overridden by specific
URL implementations. The recursive rm may or may not be built on this.
"""
raise NotImplementedError
def supports_properties(self) -> bool:
""" Return whether this URL supports setting key-value properties.
Most filesystems do not, but this allows you to handle it programmatically,
in most cases without any IO.
"""
return False
async def properties(self) -> Dict[str, List[str]]:
""" Return the key-value properties associated with this URL.
This is mostly for version control style filesystems.
Most filesystems do not support this.
"""
return NotImplementedError
async def add_properties(self, **properties):
""" Set a key-value property associated with this URL
This is mostly for version control style filesystems.
Most filesystems do not support this.
"""
raise NotImplementedError
async def delete_properties(self, names):
""" Delete key-value properties from a URL.
This is mostly for version control style filesystems.
Most filesystems do not support this.
"""
raise NotImplementedError
#
# Default serde implementations
#
async def read_pickle(self) -> Any:
""" Read a pickle from a file """
return pickle.loads(await self.read_binary())
async def write_pickle(self, obj: Any):
""" Write a pickle to a file """
await self.write_binary(pickle.dumps(obj))
async def read_yaml(self, **load_args) -> Any:
""" Read a YAML from a file """
return yaml.safe_load(await self.read_binary(), **load_args)
async def write_yaml(self, obj: Any, **dump_args):
""" Write a YAML to a file """
await self.write_binary(yaml.dump(obj, **dump_args).encode())
async def read_json(self, **load_args) -> Any:
""" Read a JSON from a file """
return json.loads(await self.read_binary(), **load_args)
async def write_json(self, obj: Any, **dump_args):
""" Write a JSON to a file """
await self.write_binary(json.dumps(obj, **dump_args).encode())
async def read_text(self) -> str:
""" Decode the binary data as UTF8 """
return (await self.read_binary()).decode()
async def write_text(self, text: str):
""" Encode the binary data as UTF8 """
await self.write_binary(text.encode())
async def _read_file(self, parser, *, recursive: bool = False):
""" Read a file using the given parser. """
shards = []
partition_parser = re.compile(r"([^/=]+=[^/=]+)")
if await self.isdir():
targets = sorted(await self.ls(recursive=recursive))
else:
targets = [self]
for child_url in targets:
try:
child_bytes = await child_url.read_binary()
except IsADirectoryError:
# That's fine, move on
continue
if child_bytes:
# Some files are just empty sentinels
shards.append(
parser(io.BytesIO(child_bytes)).assign(
**dict(
s.split("=")
for s in partition_parser.findall(child_url.dirname)
)
)
)
return pd.concat(shards)
@staticmethod
def _as_numpy_column(num_rows, pa_schema, pa_col, decimal_as="float"):
""" Convert a pyarrow column to a numpy column
lossily downcasting Decimals to float64 or int64
Parameters
----------
pa_schema: a Pyarrow schema, as taken from a Pyarrow RecordBatch
pa_col: the corresponding Pyarrow column, taken from the same RecordBatch
decimal_as: one of "float", "int":
if "float": apply the decimal scale to get the closest possible float
if "int": return an int64 array in multiples of the scale.
For example: for scale=4, the float 5.1234 is 51234 as int.
Returns
-------
a numpy array with the resulting data
Notes
-----
* Decimal arrays can be NULL, but NULLs will be replaced with 0
for integers, and with NaN for floats.
"""
if isinstance(pa_schema.type, pa.Decimal128Type):
# High precisions are not supported
# Pyarrow ORC files are stored as two streams, a bit-packed PRESENT stream,
# and a decimal128 stream of only the elements where PRESENT=True
# We will read the buffer on 128-bit signed ints directly, and since numpy
# only supports 64 bit ints we will truncate them to that.
# Somehow the pa_col.buffers() could contain None value.
valid_buffer = ([x for x in pa_col.buffers() if x] or [None])[0]
present = np.frombuffer(valid_buffer, dtype=np.uint8)
present = np.unpackbits(present, count=num_rows).astype(bool)
present_ints = np.frombuffer(
pa_col.buffers()[1], dtype=np.int64
)[::2][:np.count_nonzero(present)]
if decimal_as == "int":
ints = np.zeros(num_rows, dtype=np.int64)
ints[present] = present_ints
return np.ma.masked_array(ints, mask=~present)
elif decimal_as == "float":
floats = np.full(num_rows, np.nan)
floats[present] = present_ints * 10 ** -pa_schema.type.scale
return np.ma.masked_array(floats, mask=~present)
elif decimal_as == "decimal":
raise NotImplementedError(
"Decimal passthrough is not supported in this version of LVFS"
)
else:
raise NotImplementedError(
"Decimals must be returned as either float or int"
)
elif pa_schema.type == "date32[day]":
# PyArrow has a bug where it reads 32 bit date types as 64 bit date types.
# As a result, reading to a numpy array will fail because it isn't a multiple of the
# element size. And when using pandas, it will have either half as many elements, with
# the wrong values, or one less than half. In order to work around this error, we need
# to request the buffers and the reread them with the correct format.
valid_buffer = ([x for x in pa_col.buffers() if x] or [None])[0]
present = np.frombuffer(valid_buffer, dtype=np.uint8)
present = np.unpackbits(present, count=num_rows).astype(bool)
present_ints = np.frombuffer(
pa_col.buffers()[1], dtype=np.int32
).astype('datetime64[D]')[:np.count_nonzero(present)]
dates = np.zeros(num_rows, dtype='datetime64[D]')
dates[present] = present_ints[:num_rows]
return np.ma.masked_array(dates, mask=~present)
else:
try:
return pa_col.to_numpy()
# pyarrow.Array.to_numpy() doesn't support non-primitive types
# until v0.17 (zero_copy_only=False), so use to_pandas() as a temp
# workaround now, but need to check the content and consistency?
# If we don't need the orc support on pyarrow, maybe we don't have
# to stick with v0.13 anymore?
except NotImplementedError:
# to_pandas() will sometimes return numpy arrays already, which dont have to_numpy()
pandas_obj = pa_col.to_pandas()
if hasattr(pandas_obj, "to_numpy"):
return pandas_obj.to_numpy()
else:
return pandas_obj
async def read_csv(self, *, recursive: bool = False) -> pd.DataFrame:
""" Read one or many csv files
- If this is a directory, read all the csv files within it.
- If recursive, read all csv descended from it ad infinitum
"""
return await self._read_file(pd.read_csv, recursive=recursive)
async def read_parquet(self, *, recursive: bool = False) -> pd.DataFrame:
""" Read one or many parquet files
- If this is a directory, read all the parquet files within it.
- If recursive, read all parquets descended from it ad infinitum
"""
return await self._read_file(pd.read_parquet, recursive=recursive)
async def write_parquet(self, parq: pd.DataFrame, **_opts):
""" Write the given Pandas dataframe to a parquet file """
table = pa.Table.from_pandas(parq)
bytefile = io.BytesIO()
pq.write_table(table, bytefile)
await self.write_binary(bytefile.getvalue(), overwrite=True)
async def read_orc(self, *, keep_columns: List[str] = None, recursive: bool = False,
int_cols: str = 'Int'
) -> pd.DataFrame:
""" Read a ORC file, or if this is a directory, read all the ORC files within.
Accepts:
keep_columns (keyword-only): List: list of columns to read
recursive (keyword-only): bool: load all ORC files recursively
int_cols (keyword-only): "Int" or "int" or "float": Whether to cast int columns
as Pandas Int or int or float.
Returns:
a single dataframe concatenated from all ORC files
Notes:
- Files are visited in lexicographic order (handy for ACID tables)
- Integer columns are returned as Pandas nullable Int types
by default to account for missing values. Since neither python
nor numpy support NaN in integer types, this is an option to
have a more clear representation of missing values in the
output. If np.nan representation is preferred, these columns can
be cast to float types by setting the int_cols option. If you
are sure no missing values exist in the data, you can set
int_cols to 'int' and gain more memory efficiency.
- It is uncommon to have decimal columns that do not contain
decimals (i.e., they only have whole numbers). Because of this
and also to be memory efficient, we return decimal types as float64.
"""
frames = []
async for frame in self.read_orc_stripes(keep_columns=keep_columns,
recursive=recursive, int_cols=int_cols):
frames.append(frame)
return pd.concat(frames, ignore_index=True)
async def read_orc_stripes(self, *, keep_columns=None, recursive=False, int_cols='Int'
) -> Generator[pd.DataFrame, None, None]:
""" Read the stripes from all the ORC files in a folder, one stripe at a time.
Accepts:
recursive (keyword-only): bool: load all ORC files recursively
keep_columns (keyword-only): List[str]: Only read these columns
int_cols (keyword-only): "Int" or "int" or "float": Whether to cast int columns
as Pandas Int or int or float.
Yields:
dataframes, one from each ORC stripe.
Notes:
- Files are visited in lexicographic order (handy for ACID tables)
- It reads the whole *binary* file at once, like all URL methods.
- It only decompresses the file one stripe at a time (reducing memory a lot)
- It *does not* deserialize the stripe automatically
- This is because in situations where you need this method,
you probably also have a different deserialization in mind.
- *If you want dataframes, just call .to_pandas() on the results*
- It's a generator, so it reads but doesn't decompress until you use it
- Consider using this for:
- oversized ORCs made from concatenating a table
- Hive ACID tables, which need oddball parsing and explode in Pandas
"""
if pyorc is None:
raise RuntimeError("PyORC is required to read ORC files.")
files = sorted(await self.ls(recursive=recursive)) if await self.isdir() else [self]
# if given as input, select specified columns to read
if keep_columns:
column_names = keep_columns
else:
column_names = None
# Define a mapping from ORC types to Numpy types but map ints to floats
# so we can handle NaN in Numpy arrays. Later on we will convert these
# float columns back to int types but we will take advantage of Pandas
# nullable int type (kudos to them for saving the day where Python and
# Numpy both failed!)
types_map = {
'tinyint': '<f4',
'smallint': '<f4',
'int': '<f4',
'bigint': '<f8',
'float': '<f4',
'double': '<f8',
'decimal': '<f8',
'date': '<M8[D]',
'timestamp': '<M8[ns]',
'boolean': '?'
}
# if the user knows they have no missing values and wants 'int' then cast
# to int
if int_cols == 'int':
types_map = {
'tinyint': '<i1',
'smallint': '<i2',
'int': '<i4',
'bigint': '<i8',
'float': '<f4',
'double': '<f8',
'decimal': '<f8',
'date': '<M8[D]',
'timestamp': '<M8[ns]',
'boolean': '?'
}
# Now build a mapping for data types of int columns mapping them to
# nullable int type of Pandas. Notice that all these data types have to
# start with a capital 'I' (i.e., Int64 as opposed to int64)
ints_map = {
'tinyint': 'Int8',
'smallint': 'Int16',
'int': 'Int32',
'bigint': 'Int64'
}
# Keep track of runs for reading column names and dtypes since we only need
# to do this once. While in theory it is possible to write files with
# different schemas and columns into the same HDFS directory, in
# practice this won't happen and all files have the same schema and
# columns because they belong to the same table. So we need not repeat
# this part.
run_count = 0
# read ORC files in the directory one at a time
for orc_url in files:
try:
orc_bytes = await orc_url.read_binary()
except IsADirectoryError:
# That's fine, move on
continue
if orc_bytes:
# read the file into an ORC Reader object
orc = pyorc.Reader(fileo=io.BytesIO(orc_bytes), column_names=column_names)
if not run_count:
# get the selected column names from schema
cols = orc.selected_schema.fields
# make sure columns are in the original order
cols = [y for x, y in sorted([(orc.schema.find_column_id(c), c) for c in cols])]
# get the orc types of selected columns
orc_types = [f[1].name for f in orc.selected_schema.fields.items()]
# Build the equivalent numpy types with ints cast to float
# or int (Not Int)
np_types_flts = []
for _ in orc_types:
# if dtype is defined in the mapping then use it
if _ in types_map.keys():
np_types_flts.append(types_map[_])
# otherwise define it as object type
else:
np_types_flts.append(np.object)
# pack cols and np_types_flts into Numpy dtypes form
np_dtypes_flts = list(zip(cols, np_types_flts))
# Find the columns with int types and build a dictionary of
# their names and types
ints_types = dict()
# if the int_cols is set to Int otherwise we don't need it
# for float or int.
if int_cols == 'Int':
for col, orc_type in zip(cols, orc_types):
if 'int' in orc_type:
ints_types.update({col: ints_map[orc_type]})
# Update the run_count so we won't do this again for other
# files of the same table
run_count += 1
for stripe_i in orc.iter_stripes():
# Read the stripe using Numpy dtypes (these are the ones
# that map ints to floats)
np_cols = np.array(stripe_i.read(), dtype=np_dtypes_flts)
# Convert to Pandas DataFrame but before returning, convert
# those ORC int columns from float type to Pandas nullable
# int type so we get NA for missing int values
if int_cols == 'Int':
yield | pd.DataFrame(np_cols) | pandas.DataFrame |
import os
import requests
import json
import pandas as pd
import time
from datetime import datetime
# Make sure the environment variable is already set up
bearer_token = os.environ.get("BEARER_TOKEN")
def extract_public_metrics(df_tweets):
'''
Pulls out the `public_metrics` object and appends this to the Pandas dataframe as separate columns.
'''
if 'public_metrics' in df_tweets.columns:
public_metric_columns = ['retweet_count', 'reply_count', 'like_count', 'quote_count']
for public_metric in public_metric_columns:
df_tweets[public_metric] = df_tweets['public_metrics'].apply(lambda x: x[public_metric])
df_tweets = df_tweets.drop(columns=['public_metrics'])
return df_tweets
def extract_referenced_tweets(df_tweets):
'''
Pulls out the `referenced_tweets` object and appends this to the Pandas dataframe as separate columns.
'''
if 'referenced_tweets' in df_tweets.columns:
df_tweets['type'] = df_tweets['referenced_tweets'].apply(lambda x: x[0]['type'] if isinstance(x, list) else None)
df_tweets['referenced_tweet_id'] = df_tweets['referenced_tweets'].apply(lambda x: x[0]['id'] if isinstance(x, list) else None)
df_tweets = df_tweets.drop(columns=['referenced_tweets'])
return df_tweets
def clean_tweets_dataframe(df_tweets):
'''
Clean up dataframe object obtained from REST API JSON
'''
df_tweets = extract_public_metrics(df_tweets)
df_tweets = extract_referenced_tweets(df_tweets)
return df_tweets
def tweets_url(ids:list):
tweet_fields = 'id,author_id,public_metrics,conversation_id,created_at' #,in_reply_to_user_id #,entities
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets?ids={','.join(ids)}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
return url
def tweet_url(tweet_id:str):
'''
Pulls data for an individual tweet. You can adjust ids to include a single Tweets
or add to up to 100 comma-separated IDs
'''
tweet_fields = "tweet.fields=lang,author_id"
ids = "ids="+tweet_id
url = "https://api.twitter.com/2/tweets?{}&{}".format(ids, tweet_fields)
return url
def search_url(query:str, max_results:int=100, start_time=None, end_time=None) -> str:
'''
Generates endpoint for Twitter REST API: GET /2/tweets/search/recent
Time format must be in RFC 3339 UTC timestamp eg `2022-01-04T00:00:00.000Z`
'''
tweet_fields = 'id,author_id,public_metrics,conversation_id,created_at' #,in_reply_to_user_id #,entities
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets/search/recent"+\
f"?query={query} -is:reply -is:quote"+\
f"&max_results={max_results}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
if start_time is not None:
url+=f"&start_time={start_time}"
if start_time is not None:
url+=f"&end_time={end_time}"
return url
def replies_to_user_url(user_id:str, max_results:int=100) -> str:
'''
Generates endpoint for Twitter REST API: GET /2/tweets/search/recent
Gets all replies to an individual user_id
'''
tweet_fields = 'id,author_id,public_metrics'
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets/search/recent?query=to%3A{user_id}%20OR%20retweets_of%3A{user_id}"+\
f"&max_results={max_results}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
return url
def liking_users_url(tweet_id):
'''
'''
url = f"https://api.twitter.com/2/tweets/{tweet_id}/liking_users"
return url
def retweeting_users_url(tweet_id):
url = f"https://api.twitter.com/2/tweets/{tweet_id}/retweeted_by"
return url
def user_url(user_id):
url = f"https://api.twitter.com/2/users/{user_id}"
return url
def get_conversation_url(conversation_id, max_results=100):
'''
Get all comments and replies related to this tweet
'''
tweet_fields = 'id,author_id,public_metrics,conversation_id,created_at' #,in_reply_to_user_id #,entities
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets/search/recent"+\
f"?query=conversation_id:{conversation_id}"+\
f"&max_results={max_results}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
return url
def bearer_oauth(r):
'''
Method required by bearer token authentication.
'''
r.headers['Authorization'] = f"Bearer {bearer_token}"
return r
def connect_to_endpoint(url, wait_on_timeout=True):
response = requests.request("GET", url, auth=bearer_oauth)
epochtime = response.headers['x-rate-limit-reset']
rate_limit_reset_time = datetime.fromtimestamp(int(epochtime))
rate_limit_remaining = response.headers['x-rate-limit-remaining']
print(f"{response.status_code}\tx-rate-limit-remaining: {rate_limit_remaining}\tx-rate-limit-reset: {rate_limit_reset_time}")
# If the REST API limit is reached, we can sleep until the limit is reset and then continue
if response.status_code == 429 and wait_on_timeout == True:
rate_limit_reset_time = datetime.fromtimestamp(int(epochtime))
time_difference = rate_limit_reset_time-datetime.now()
print(f"Rate limit resets at {rate_limit_reset_time}. Sleeping for {time_difference.seconds} seconds...")
time.sleep(time_difference.seconds+10)
print(datetime.now())
response = requests.request("GET", url, auth=bearer_oauth)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
return response.json()
def day_to_time(day:str) -> str:
'''
Convert `dd-mon-yyyy` format to UTC timestamp. Used to generate Twitter API url.
'''
if day is not None:
time = datetime.strptime(day,'%d-%b-%Y').isoformat() + "Z"
else:
time = None
return time
def search_and_paginate(query:str, num_results:int=100, wait_on_timeout=True, start_day=None, end_day=None):
'''
Calls the Twitter REST API /2/tweets/search/recent and paginates results
:return: Tweets as a Pandas dataframe.
'''
results_each_call = 100
# Must be RFC 3339 UTC timestamp
start_time = day_to_time(start_day)
end_time = day_to_time(end_day)
url = search_url(query, results_each_call,start_time,end_time)
json_response = connect_to_endpoint(url, wait_on_timeout)
df_tweets = pd.DataFrame(json_response['data'])
df_users = pd.DataFrame(json_response['includes']['users'])
# As maximum results for each call is 100, more REST API calls may need to be made to collect
# more results.
while 'next_token' in json_response['meta'] and len(df_tweets) < num_results:
pagination_token = json_response['meta']['next_token']
json_response = connect_to_endpoint(f'{url}&next_token={pagination_token}', wait_on_timeout)
df_tweets = df_tweets.append(pd.DataFrame(json_response['data']),ignore_index=True)
df_users = df_users.append(pd.DataFrame(json_response['includes']['users']),ignore_index=True)
df_tweets = clean_tweets_dataframe(df_tweets)
return df_tweets, df_users
def get_original_tweets(df_tweets, df_users):
'''
If the retweeted tweet is not in the original list, grab this as well and append it to the Pandas dataframe.
Can probably do one call for multiple `conversation_id`s
'''
df_referenced_tweets = pd.DataFrame()
df_referenced_users = pd.DataFrame()
# Get tweets that reference tweets not in the list
ids=df_tweets[(~df_tweets['referenced_tweet_id'].isin(df_tweets['id'])) & (df_tweets['type']=='retweeted')]['referenced_tweet_id'].tolist()
#drop duplicates
ids = list(dict.fromkeys(ids))
# As a maximum of 100 tweets can be called, list needs to be split into chunks
chunks = [ids[x:x+100] for x in range(0, len(ids), 100)]
for chunk in chunks:
url = tweets_url(chunk)
json_response = connect_to_endpoint(url)
df_referenced_tweets = df_referenced_tweets.append( | pd.DataFrame(json_response['data']) | pandas.DataFrame |
# Este arquivo contém as funções usadas para ajustar as curvas PV
# e outras funções úteis
############################################################### BIBLIOTECAS:
import numpy as np # para fazer contas e mexer com matrizes
import pandas as pd # para montar DataFrames (tabelas de bancos de dados)
from pathlib import Path # para trabalhar com diretorios e arquivos
import pickle # para gravar e ler dados
import matplotlib.pyplot as plt # para gráficos
import seaborn as sns # para gráficos com DataFrames
from scipy.optimize import curve_fit # para ajuste das curvas dos modelos
import math # para erf()
from scipy.interpolate import interp1d # para interpolar os pontos PV
############################################################### MODELOS:
# função usada para fitar o modelo PV sigmoide (doente)
# b b
# V(x) = a + ---------------------- = a + ------------------------
# 1 + exp(-(x/d) + (c/d) 1 + exp(-x/d).exp(c/d)
#
# lim (x-> inf) V(x) = a + b
def sigmoidvenegas1(x, a, b, c, d):
return a + b/(1 + np.exp(-(x-c)/d))
########## paiva
def sigmoidpaiva(x,TLC,k1,k2):
return TLC/(1+(k1*np.exp(-k2*x)))
# modificação nossa: incluindo offset
def sigmoidpaivaoffset1(x,TLC,k1,k2,offset):
return TLC/(1+(k1*np.exp(-k2*x))) + offset
# baseado no artigo original do paiva1975, e incluindo offset:
def sigmoidpaivaoffset(x,TLC,k1,k2,offset):
return TLC/(1+(k1*TLC*np.exp(-k2*x))) + offset
######### venegas2
def sigmoidvenegas2(x,TLC,B,k,c,d):
return (TLC-(B*np.exp(-k*x)))/(1 + np.exp(-(x-c)/d))
# modificação nossa: incluindo offset
def sigmoidvenegas2offset(x,TLC,B,k,c,d,offset):
return (TLC-(B*np.exp(-k*x)))/(1 + np.exp(-(x-c)/d)) + offset
# sinal original: incorreto, pois aqui quando P -> c, V -> infty
def sigmoidvenegas2original(x,TLC,B,k,c,d):
return (TLC-(B*np.exp(-k*x)))/(1 - np.exp(-(x-c)/d))
######### murphy e engel
def sigmoidmurphy(x,VM,Vm,k1,k2,k3): ### CUIDADO: P = f(V) !!!
return ( k1/(VM-x) ) + ( k2/(Vm-x) ) + k3
# modificação nossa: incluindo offset
######### murphy e engel
def sigmoidmurphyoffset(x,TLC,offset,k1,k2,k3): ### CUIDADO: P = f(V) !!!
return ( k1/((TLC+offset)-x) ) + ( k2/(offset-x) ) + k3
######### recruit_unit
# Modelo exponencial simples de curva PV pulmonar (Salazar 1964)
# Volume = Vmax*(1-e^(-K*Paw))
# Paw = pressão na via aérea
# K = 'constante de tempo' da exponencial
def expsalazar(x,Vo,K):
return Vo*(1-np.exp(-K*x))
# modelo de unidades recrutadas com erf()
# ajustando a função para uma entrada array (para curve_fit)
def meu_erf_vec(Paw,mi,sigma):
saida_lst = []
for x_in in Paw:
x = (x_in-mi)/(sigma*1.5)
merf = math.erf(x)
saida_lst.append((merf/2)+0.5)
return np.array(saida_lst)
# modelo proposto pelo grupo (nós)
def sigmoid_recruit_units(Paw,K,Vmax,mi,sigma,offset):
Vmax_recrutado = Vmax*meu_erf_vec(Paw,mi,sigma)
V = Vmax_recrutado*(1-np.exp(-K*Paw)) + offset
return V
############################################################### FUNÇÕES:
'''
Carrega os arquivos .pickle das subpastas da pasta './porquinhos/'
e retorna um DataFrame com os dados.
As manobras C contém apenas 4 passos, e as D, apenas 5 passos.
'''
def carrega_pickles(folder = 'porquinhos'):
dataframes_lst = [] # lista de dataframe: Cada elemento da lista corresponde a um dataframe de um porco/manobra/dados PV
for file_name in Path(folder).rglob('*.pickle'):
print(f"\rLendo {file_name.name}\t\t\t")
with open(file_name, "rb") as file: # abre o arquivo.pickle
porquinho = pickle.load(file)
for manobra in porquinho: #Para cada manobra
if manobra == "D": # Posso fazer 3,4,5 passos
n_steps = 5
elif manobra == "C": # Posso fazer 3,4 passos
n_steps = 4
elif manobra == "B": # Posso fazer 3 passos
n_steps = 3
# Formato os dados de entrada
format_data = []
for pi, pe, wi, we in zip(porquinho[manobra]["p_i"], porquinho[manobra]["p_e"],
porquinho[manobra]["w_i"], porquinho[manobra]["w_e"]):
format_data.extend([pi,wi,pe,we])
format_data = np.array(format_data).reshape(-1,2) # monta matriz de N linhas e 2 colunas
##########################################################
caso = []
caso.append(porquinho.name)
caso.append(manobra)
caso.append(format_data)
caso.append(n_steps)
casodf = pd.DataFrame(caso, index = ['Animal', 'Manobra', 'Dados', 'n_steps']).T
dataframes_lst.append(casodf)
# Junta todos os dataframes da lista em um único DataFrame:
dadosdf = pd.concat(dataframes_lst, ignore_index=True)
# Extrai os dados de pressão e volume dos dados raw dos arquivos pickle:
pv_lst = []
for idx,caso in dadosdf.iterrows():
pv = []
ps,vs = Data2PV(caso.Dados)
pv.append(ps)
pv.append(vs)
pvdf = pd.DataFrame([pv], columns = ['Pressoes', 'Volumes'])
pv_lst.append(pvdf)
pvdf_all = pd.concat(pv_lst, ignore_index=True)
dadosdf_completo = pd.concat((dadosdf,pvdf_all),axis=1)
# inclui uma coluna para volume esperado...
dadosdf_completo["volume_esperado"] = 0
return dadosdf_completo
'''
Retorna os vetores de pressão e volume a partir dos dados raw disponíveis nos pickles
'''
def Data2PV(data):
data2 = data[0::2, :]
pressures = data2[:,0]
volumes = data2[:,1]
return pressures,volumes
def encontra_volumes_limites_Murphy(parameters, modelo=sigmoidmurphy, pmax=100, pmin=0): ### no modelo de Murphy, P = f(V)
v_max = 1000
v_min = 0
# encontra limite superior:
for v in range(1,10000):
p = modelo(v,*parameters)
if p > pmax:
v_max = v
break
# encontra limite superior:
for v in range(1,-10000,-1):
p = sigmoidmurphy(v,*parameters)
if p < pmin:
v_min = v
break
return int(v_min),int(v_max)
# metodos : lm, dogbox, trf
def testa_modelo(df, modelo, meu_p0 = [], metodo = 'lm', n_colunas = 4, texto = '', TLC_index = 0, meus_bounds = [], n_points_interp=0, debug=True, invert_PV = False):
numero_de_casos = len(df)
fig = plt.figure(figsize=(25,5*numero_de_casos/n_colunas))
erro_vec = []
n_fitted = 0
for caso_teste in range(numero_de_casos):
p_in = df.iloc[caso_teste].Pressoes
v_in = df.iloc[caso_teste].Volumes
# interpola pontos (se n_points_interp==0, a função não interpola)
p, v = interpola_PV(p_in,v_in,n_points_interp)
plt.subplot(int(numero_de_casos/n_colunas)+1,n_colunas,caso_teste+1)
fig.tight_layout()
if (n_points_interp > 0):
plt.scatter(p,v,label='interp',c='k',marker='x')
plt.scatter(p_in,v_in,label='raw')
try:
if (invert_PV == False): ################################### V = f(P)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0, bounds=meus_bounds)
else: ###################################################### P = f(V)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0, bounds=meus_bounds)
if debug:
textop = ""
for p in parameters:
if ( np.abs(p) > 1 ):
textop = textop + f'{p:7.1f}' + ' '
else:
textop = textop + f'{p:.3f}' + ' '
print(f'Testando caso {caso_teste}: {df.iloc[caso_teste].Animal}: [{textop}]')
if (invert_PV == False): ################################### V = f(P)
meu_p = range(1,100)
meu_v = modelo(meu_p,*parameters)
else: ###################################################### P = f(V)
v_min,v_max = encontra_volumes_limites_Murphy(parameters,modelo=modelo)
meu_v = np.asarray(range(v_min,v_max))
meu_p = modelo(meu_v,*parameters)
plt.plot(meu_p,meu_v,'r',label='fit')
n_fitted = n_fitted + 1
if ( df.iloc[caso_teste]["volume_esperado"] == 0 ):
plt.title(f'Case: {df.iloc[caso_teste].Animal}. TLC = {parameters[TLC_index]:.0f} mL')
else:
v_esperado = df.iloc[caso_teste]["volume_esperado"]
if (modelo.__name__ == 'sigmoidmurphy'):
TLC = parameters[0] - parameters[1]
else:
TLC = parameters[TLC_index]
erro = 100*(TLC-v_esperado)/v_esperado
erro_vec.append(erro)
plt.title(f'Case: {df.iloc[caso_teste].Animal}. TLC = {TLC:.0f} mL. Error: {erro:.1f}%')
except Exception as e:
print(f'\tCaso {caso_teste} ({df.iloc[caso_teste].Animal}) deu erro...')
plt.title(f'Case: {df.iloc[caso_teste].Animal}. Error fitting.')
#except:
# print('erro')
# pass
plt.xlabel('Pressure [cmH2O]')
plt.ylabel('Volume [mL]')
plt.legend()
fig.suptitle(f'PV Graph. Model: {modelo.__name__}. {texto}', fontsize=16, y=1.05)
plt.show()
if ( len(erro_vec) > 0 ):
erro_medio = np.mean(np.abs(erro_vec))
erro_norm = np.linalg.norm(erro_vec)
else:
erro_medio = -1
erro_norm = -1
if debug:
print(f'Norma(erro): {erro_norm:.1f}. Erro médio: {erro_medio:.2f}%. Ajustados: {n_fitted}.')
return erro_norm, erro_medio, n_fitted
# o mesmo que a função anterior, mas não mostra gráficos ou mensagens... para uso dentro de loops...
# metodos : lm, dogbox, trf
def testa_modelo_loop(df, modelo, meu_p0 = [], metodo = 'lm', TLC_index = 0, meus_bounds = [], n_points_interp=0, invert_PV = False):
numero_de_casos = len(df)
erro_vec = []
n_fitted = 0
for caso_teste in range(numero_de_casos):
p_in = df.iloc[caso_teste].Pressoes
v_in = df.iloc[caso_teste].Volumes
# interpola pontos (se n_points_interp==0, a função não interpola)
p, v = interpola_PV(p_in,v_in,n_points_interp)
try:
if (invert_PV == False): ################################### V = f(P)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0, bounds=meus_bounds)
else: ###################################################### P = f(V)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0, bounds=meus_bounds)
if ( df.iloc[caso_teste]["volume_esperado"] == 0 ):
pass
else:
v_esperado = df.iloc[caso_teste]["volume_esperado"]
if (modelo.__name__ == 'sigmoidmurphy'):
TLC = parameters[0] - parameters[1]
else:
TLC = parameters[TLC_index]
erro = 100*(TLC-v_esperado)/v_esperado
erro_vec.append(erro)
n_fitted = n_fitted + 1
if ( (metodo=='lm') & (parameters[TLC_index] > 6000) ): # não fitou...
n_fitted = n_fitted - 1
except Exception as e:
pass
if ( len(erro_vec) > 0 ):
erro_medio = np.mean(np.abs(erro_vec))
erro_norm = np.linalg.norm(erro_vec)
else:
erro_medio = -1
erro_norm = -1
return erro_norm, erro_medio, n_fitted
# o mesmo que a função anterior, mas solta resultado por caso individualmente
# metodos : lm, dogbox, trf
def testa_modelo_indiv(df, modelo, meu_p0 = [], metodo = 'lm', TLC_index = 0, meus_bounds = [],
n_points_interp=0, limite_vol_max = 6000, limite_vol_min = 100,
erro_factor_limit = 70, invert_PV = False):
numero_de_casos = len(df)
dfresult_lst = []
for caso_teste in range(numero_de_casos):
p_in = df.iloc[caso_teste].Pressoes
v_in = df.iloc[caso_teste].Volumes
# interpola pontos (se n_points_interp==0, a função não interpola)
p, v = interpola_PV(p_in,v_in,n_points_interp)
flag_fitted = False
erro = 0
parameters = []
erro_fit = 0
erro_factor = 0
tlc_eit = 0
try:
if (invert_PV == False): ################################### V = f(P)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0, bounds=meus_bounds)
else: ###################################################### P = f(V)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0, bounds=meus_bounds)
if (modelo.__name__ == 'sigmoidmurphy'):
TLC = parameters[0] - parameters[1]
else:
TLC = parameters[TLC_index]
tlc_eit = TLC
# Calcula erro
if ( df.iloc[caso_teste]["volume_esperado"] == 0 ):
pass
else:
v_esperado = df.iloc[caso_teste]["volume_esperado"]
erro = 100*(TLC-v_esperado)/v_esperado
# Calcula erro do fit
if (invert_PV == False): ################################### V = f(P)
v_fit = modelo(p_in,*parameters)
erro_fit = np.linalg.norm(v_fit-v_in)
erro_factor = erro_fit/np.power(len(v_in),0.5)
else: ###################################################### P = f(V)
p_fit = modelo(v_in,*parameters)
erro_fit = np.linalg.norm(p_fit-p_in)
erro_factor = (erro_fit/np.power(len(v_in),0.5))*(( max(v_in)-min(v_in) )/( max(p_in)-min(p_in) ))
# Verifica se fitou errado -> não fitou
if ( limite_vol_min <= TLC <= limite_vol_max ): # fitou alguma coisa coerente...
flag_fitted = True
if ( erro_factor > erro_factor_limit ):
flag_fitted = False
except Exception as e:
pass
index = []
caso = []
index.append('Animal')
caso.append(df.iloc[caso_teste].Animal)
index.append('Maneuver')
caso.append(df.iloc[caso_teste].Manobra)
index.append('n_steps')
caso.append(df.iloc[caso_teste].n_steps)
index.append('Pressures')
caso.append(df.iloc[caso_teste].Pressoes)
index.append('Volumes')
caso.append(df.iloc[caso_teste].Volumes)
index.append('Model')
caso.append(modelo.__name__)
index.append('Method')
caso.append(metodo)
index.append('TLC_index')
caso.append(TLC_index)
index.append('TLC_eit')
caso.append(tlc_eit)
index.append('N_points_interp')
caso.append(n_points_interp)
index.append('p0')
caso.append(meu_p0)
index.append('bounds')
caso.append(meus_bounds)
index.append('fitted')
caso.append(flag_fitted)
index.append('parameters')
caso.append(parameters)
index.append('Vol_CT')
caso.append(df.iloc[caso_teste]["volume_esperado"])
index.append('error')
caso.append(erro)
index.append('fit error')
caso.append(erro_fit)
index.append('error factor')
caso.append(erro_factor)
index.append('Raw data')
caso.append(df.iloc[caso_teste].Dados)
casodf = pd.DataFrame(caso, index).T
dfresult_lst.append(casodf)
dfresult = pd.concat(dfresult_lst, ignore_index=True)
# garante que algumas colunas serão tratadas como float
dfresult[['Vol_CT', 'error']] = dfresult[['Vol_CT', 'error']].astype(float)
return dfresult
# interpola vetores PV
# n_points = número de pontos intermediários
def interpola_PV(pressoes,volumes,n_points=0):
if len(pressoes)<3:
kind = "linear"
elif len(pressoes)==3:
kind = "quadratic"
else:
kind = "cubic"
interp_pressures = np.linspace(pressoes[0], pressoes[-1], (len(pressoes)*(n_points+1))-n_points, endpoint=True)
interp_func = interp1d(pressoes, volumes, kind=kind)
interp_volumes = interp_func(interp_pressures)
return interp_pressures, interp_volumes
# Classe usada para dados dos modelos usados na função testa_varios
class dados_modelos:
model_function = ''
TLC_index = ''
p0 = ''
bounds = ''
invert_PV = False
def testa_varios_indiv(dadosdf, modelos, metodos = ('lm','trf','dogbox'), vec_interp = [0, 1, 2, 10, 20]):
df_lst = []
for mod in modelos:
print(f'Rodando {mod.model_function.__name__}')
for n_points_interp in vec_interp:
for metodo in metodos:
if (metodo == 'lm'): # 'lm' não aceita bounds
dfresult = testa_modelo_indiv(dadosdf, mod.model_function, metodo = metodo, meu_p0 = mod.p0,
TLC_index=mod.TLC_index, n_points_interp=n_points_interp, invert_PV=mod.invert_PV)
else:
dfresult = testa_modelo_indiv(dadosdf, mod.model_function, metodo = metodo, meu_p0 = mod.p0,
TLC_index=mod.TLC_index, meus_bounds=mod.bounds,
n_points_interp=n_points_interp, invert_PV=mod.invert_PV)
df_lst.append(dfresult)
dadosdf = | pd.concat(df_lst, ignore_index=True) | pandas.concat |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
| pd.DataFrame({"a": ary}) | pandas.DataFrame |
""" Functions used to query the PVlive api """
import logging
from concurrent import futures
from datetime import datetime, timedelta
from typing import Optional
import pandas as pd
import pytz
from pvlive_api import PVLive
from tqdm import tqdm
from nowcasting_dataset.data_sources.gsp.eso import get_list_of_gsp_ids
logger = logging.getLogger(__name__)
CHUNK_DURATION = timedelta(days=30)
def load_pv_gsp_raw_data_from_pvlive(
start: datetime, end: datetime, number_of_gsp: int = None, normalize_data: bool = True
) -> pd.DataFrame:
"""
Load raw pv gsp data from pvlive.
Note that each gsp is loaded separately. Also the data is loaded in 30 day chunks.
Args:
start: the start date for gsp data to load
end: the end date for gsp data to load
number_of_gsp: The number of gsp to load. Note that on 2021-09-01 there were 338 to load.
normalize_data: Option to normalize the generation according to installed capacity
Returns: Data frame of time series of gsp data. Shows PV data for each GSP from {start} to {end}
"""
# get a lit of gsp ids
gsp_ids = get_list_of_gsp_ids(maximum_number_of_gsp=number_of_gsp)
# setup pv Live class, although here we are getting historic data
pvl = PVLive()
# set the first chunk of data, note that 30 day chunks are used except if the end time is
# smaller than that
first_start_chunk = start
first_end_chunk = min([first_start_chunk + CHUNK_DURATION, end])
gsp_data_df = []
logger.debug(f"Will be getting data for {len(gsp_ids)} gsp ids")
# loop over gsp ids
# limit the total number of concurrent tasks to be 4, so that we don't hit the pvlive api
# too much
future_tasks = []
with futures.ThreadPoolExecutor(max_workers=4) as executor:
for gsp_id in gsp_ids:
# set the first chunk start and end times
start_chunk = first_start_chunk
end_chunk = first_end_chunk
# loop over 30 days chunks (nice to see progress instead of waiting a long time for
# one command - this might not be the fastest)
while start_chunk <= end:
logger.debug(f"Getting data for gsp id {gsp_id} from {start_chunk} to {end_chunk}")
task = executor.submit(
pvl.between,
start=start_chunk,
end=end_chunk,
entity_type="gsp",
entity_id=gsp_id,
extra_fields="installedcapacity_mwp",
dataframe=True,
)
future_tasks.append(task)
# add 30 days to the chunk, to get the next chunk
start_chunk = start_chunk + CHUNK_DURATION
end_chunk = end_chunk + CHUNK_DURATION
if end_chunk > end:
end_chunk = end
logger.debug("Getting results")
# Collect results from each thread.
for task in tqdm(future_tasks):
one_chunk_one_gsp_gsp_data_df = task.result()
if normalize_data:
one_chunk_one_gsp_gsp_data_df["generation_mw"] = (
one_chunk_one_gsp_gsp_data_df["generation_mw"]
/ one_chunk_one_gsp_gsp_data_df["installedcapacity_mwp"]
)
# append to longer list
gsp_data_df.append(one_chunk_one_gsp_gsp_data_df)
# join together gsp data
gsp_data_df = | pd.concat(gsp_data_df) | pandas.concat |
import requests
from xls_functions import is_number, concat, format_date_time, evalfunc_str, is_in, now, uuid
import json
import requests
import time
import os
import ast
import pandas as pd
from copy import deepcopy
import re
from gen_funcs import *
#--------------------------------------------------------------------------------------------------------------------------------
# sim control functions
#--------------------------------------------------------------------------------------------------------------------------------
# form string into json query
def json_query(string):
dict1= {'query':string} # form dictionary
json_query = json.dumps(dict1) # convert dictionary into json
return json_query
# api requests
def api_query(string,api_key):
api_key = str(api_key)
json_qry = json_query(string) # convert string into json data
api_url ='https://new.simcontrol.co.za/graphql/'
headers = {'Content-Type': 'application/json', 'simcontrol-api-key': api_key}
api_qry = requests.post(url = api_url, headers = headers, data = json_qry) # api_qry
return api_qry.json()
# update internal database
def append_json_db(filepath,data):
incentive_db = read_json_file(filepath) # read t[he internal db
incentive_db.append(data) # append current recharge data to internal db in json file
write_to_json(filepath, incentive_db) # write to the internal db
# metadata for a specific recharge
def recharge_data(api_key, reference):
# form query string
string = concat('{ adhocRecharges(first: 1, reference: \"',reference,'\") { edges { node {id msisdn network { name } productType product { label } price status succeededAt created failureType reference} } } }')
data = api_query(string,api_key)['data']['adhocRecharges']['edges']
if len(data) != 0:
data = data[0]['node']
data['network'] = data['network']['name']
return data
# status of recharge
def recharge_status(api_key, reference):
status = recharge_data(api_key, reference)['status']
return status
# simcontrol balance
def sim_control_balance(api_key):
# check the remaining account balance
string = "{ account { name balance } }"
bal_json = api_query(string,api_key)
return bal_json['data']['account'][0]['balance']
# list of mobile network operators
def sim_control_networks(api_key):
string = "{ networks {id name } }"
api_resp = api_query(string,api_key)
mno = api_resp['data']['networks'] # API response
#create a dcitionary of the available networks
networks = {}
for i in range(len(mno)):
networks[mno[i]['name'].lower()] = mno[i]['id']
return networks
# format contact for simcontrol
def simcontact(msisdn):
msisdn = str(msisdn)
#print('\nMSISDN: %s'%msisdn)
if is_number(msisdn):
msisdn = str(int(float(msisdn)))
#print('\nMSISDN: %s'%msisdn)
if len(msisdn)==11 and '.' in msisdn and msisdn.index('.')==9:
idx = msisdn.index('.')
sub_1 = msisdn[0:9]
msisdn = msisdn[-1] + sub_1
contact = remove_specialchar(msisdn) # remove special characters
# format contact
if contact[0] =='0' and len(contact) == 10:
return concat('+27',contact[1:len(contact)])
elif contact[0:2] == '27' and len(contact[2:len(contact)])==9:
return concat('+',contact)
elif len(contact)== 9 and contact[0]!= '0':
return concat('+27', contact)
else:
return concat(msisdn,' is not recognised as a South African mobile number.')
#count the number of recharges per contact
def recharge_count(contact,api_key, prodType = 'airtime', status = 'success', r_idx = 100, startDate = '', endDate=''):
# create query string
if startDate == '' or endDate == '':
q_string = concat('{ adhocRecharges( first: ',r_idx,', msisdn: \"',simcontact(contact),'\", productType: \"',prodType.upper(),'\", status: \"',status.upper(),'\") { edges { node {id msisdn network { name } productType product { label } price status succeededAt created failureType reference} } } }')
else:
# format startDate and endDate
fmt = '%Y-%m-%dT%H:%M:%S'
startDate = format_date_time(str(startDate),fmt)
endDate = format_date_time(str(endDate),fmt)
q_string = concat('{ adhocRecharges( first: ',r_idx,', msisdn: \"',simcontact(contact),'\", productType: \"',prodType.upper(),'\", status: \"',status.upper(),'\", created_Gte:\"',startDate,'\" created_Lt:\"',endDate,'\") { edges { node {id msisdn network { name } productType product { label } price status succeededAt created reference } } } }')
# prequest the 1st 100 recharges fot the msisdn
q_res = api_query(q_string,api_key) # perform api_query
#print(q_res)
#count number of successful recharges
recharges = str(q_res).count(simcontact(contact))
return recharges
# Define 'airtime' function to send airtime to msisidn and return metadata
def airtime(api_key, msisdn, network, amount, ref=None):
if ref == None:
ref = str(uuid())
if msisdn != 'nan' and network != 'nan' and amount != 'nan':
# a. Determine the network ID for a given network name
try:
mno_id = sim_control_networks(api_key)[network.lower()] #retrieve network ID
except:
mno_id = 'TmV0d29ya05vZGU6MTM='
# b. form query_string and query simcontrol API
string = concat('mutation { rechargeSim(msisdn: \"',simcontact(msisdn),'\", networkId: \"',mno_id,'\", airtimeAmount:',amount,', reference: \"',ref, '\") { rechargeId message}}')
recharge = api_query(string,api_key) # perform api_query
print('\nAIRTIME REQ: \n%s'%recharge)
# c. request recharge data
data_recharge = [recharge_data(reference=ref,api_key=api_key)] # get metadata data of recharge
return pd.DataFrame(data_recharge)
# recharge failed recharges
def recharge_failed(api_key, msisdn, ref=None, startDate = None, endDate= str(now()), recharge_limit = 1, project = None, prodType = 'airtime'):
# a. obtain history of recharges for the given msisdn
if endDate != None and startDate != None:
df_rec = msisdn_history(api_key, msisdn, startDate = startDate, prodType = prodType)
else:
df_rec = msisdn_history(api_key, msisdn, prodType = prodType)
if str(df_rec) != 'None':
print('prodType: ', prodType,'\t MSISDN:',msisdn, '\t HISTORY: ',len(df_rec))
# b. obtain successful and failed recharges
if project != None:
s_rec = df_rec[df_rec['reference'].str.contains(project) & df_rec['status'].str.contains('SUCCESS')] # records of successful recharges in the given project
f_rec = df_rec[df_rec['reference'].str.contains(project) & df_rec['status'].str.contains('FAILED')] # records of FAILED recharges in the given project
else:
s_rec = df_rec[df_rec['status'].str.contains('SUCCESS')] # records of successful recharges
f_rec = df_rec[df_rec['status'].str.contains('FAILED')] # records of failed recharges
if len(s_rec) < recharge_limit and len(f_rec) >0 and len(f_rec) <= 1:
# recharge msisdn
if f_rec.loc[0,'productType'] == 'AIRTIME':
recharge = airtime(api_key, msisdn, network = f_rec.loc[0,'network']['name'] , amount = f_rec.loc[0,'price'], ref= concat(f_rec.loc[0,'reference'],'_FINALTRIAL'))
return recharge
else:
recharge = buyProd(api_key, msisdn, network = f_rec.loc[0,'network']['name'] , prodID = f_rec.loc[0,'product']['id'], ref= concat(f_rec.loc[0,'reference'],'_FINALTRIAL'))
return recharge
else:
return None
# format dates to suit simcontrol
def sim_dateformat( startDate = None, endDate = None, fmt = '%Y-%m-%dT%H:%M:%S' ):
if startDate != None and endDate != None:
startDate = format_date_time(str(startDate),fmt)
endDate = format_date_time(str(endDate),fmt)
return {'startDate': startDate, 'endDate' : endDate}
else:
return None
# convert simcontrol's dictionary to DataFrame
def simdict_to_DataFrame(simdict):
rec_lst = []
for rec in simdict:
rec = rec['node']
rec_lst.append(rec)
return | pd.DataFrame(rec_lst) | pandas.DataFrame |
import requests
import pandas as pd
import json
def load_data():
stations = [{'name': '<NAME>' , 'id': 97280}, {'name': '<NAME>' , 'id': 97100}]
df = pd.DataFrame()
for station in stations:
station_url = "https://opendata-download-metobs.smhi.se/api/version/latest/parameter/14/station/{0}/period/latest-months/data.json".format(station['id'])
r = requests.get(url = station_url)
data = r.json()
station_df = pd.DataFrame(data['value'])
station_df['date'] = pd.to_datetime(station_df['date'], unit='ms')
station_df = station_df.set_index('date')
station_df.index = pd.to_datetime(station_df.index)
station_df.columns = | pd.MultiIndex.from_product([[station['name']], station_df.columns], names=['station', 'data']) | pandas.MultiIndex.from_product |
"""
After training with p1_training_inverse_model.py, run this for p1
Then run p1_make_vids.py to generate the videos
"""
import torch
import logging
import torch.nn as nn
from dataset import ObjPushDataset
from model_learners import InverseModel
from torch.utils.data import Dataset, DataLoader
from push_env import PushingEnv
import numpy as np
import pandas as pd
device = "cuda" if torch.cuda.is_available() else "cpu"
logger = logging.getLogger(__name__)
logging.basicConfig()
logger.setLevel(logging.INFO)
##### HYPERPARAMETERS ######
start_state_dims = 2
next_state_dims = 2
action_dims = 4
nn_layer_1_size = 64
nn_layer_2_size = 32
criterion = nn.MSELoss()
lr = 8e-4
seed = 0
num_epochs = 140
bsize = 512
num_pushes = 10
############################
def main():
logger.info("Instantiating model and importing weights")
# instantiate forward model and import pretrained weights
inv_model = InverseModel(start_state_dims=start_state_dims,
next_state_dims=next_state_dims,
action_dims=action_dims,
latent_var_1=nn_layer_1_size,
latent_var_2=nn_layer_2_size,
criterion=criterion,
lr=lr,
seed=seed)
inv_model.load_state_dict(torch.load("invmodel_learned_params.pt"))
# Load in data
logger.info("Importing test data")
test_dir = 'push_dataset/test'
# only want 1 push each time, so set batch_size to 1
test_loader = DataLoader(ObjPushDataset(test_dir), batch_size=1, shuffle=True)
env = PushingEnv()
errors = []
true_pushes = []
pred_pushes = []
logger.info("Running loop")
for i, (start_state, goal_state, true_action) in enumerate(test_loader):
logger.info(f'Iteration #{i}')
# Convert inputs to floats
start_state = start_state.float()
goal_state = goal_state.float()
true_action = true_action.float()
# Use inverse model to predict action given the start and goal states
combined_input = torch.cat((start_state, goal_state), dim=1)
pred_action = inv_model(combined_input)
# Switch output from tensors to numpy for easy use later
start_state = start_state.data.numpy()[0]
goal_state = goal_state.data.numpy()[0]
true_action = true_action.data.numpy()[0]
pred_action = pred_action.data.numpy()[0]
start_x, start_y, end_x, end_y = pred_action
_, end_state = env.execute_push(start_x, start_y, end_x, end_y)
end_state = np.array(end_state)
# Calculate errors
action_error = np.linalg.norm(true_action - pred_action)
state_error = np.linalg.norm(goal_state - end_state)
# Keep the results
errors.append(dict(action_error=action_error, state_error=state_error))
true_pushes.append(dict(obj_x=start_state[0], obj_y=start_state[1], start_push_x=true_action[0],
start_push_y=true_action[1], end_push_x=true_action[2], end_push_y=true_action[3]))
pred_pushes.append(dict(obj_x=start_state[0], obj_y=start_state[1], start_push_x=pred_action[0],
start_push_y=pred_action[1], end_push_x=pred_action[2], end_push_y=pred_action[3]))
if i > num_pushes - 1:
break
pd.DataFrame(errors).to_csv("results/P1/inverse_model_errors.csv")
| pd.DataFrame(true_pushes) | pandas.DataFrame |
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from eva.expression.abstract_expression import AbstractExpression, \
ExpressionType, \
ExpressionReturnType
from eva.models.storage.batch import Batch
class ComparisonExpression(AbstractExpression):
def __init__(self, exp_type: ExpressionType, left: AbstractExpression,
right: AbstractExpression):
children = []
if left is not None:
children.append(left)
if right is not None:
children.append(right)
super().__init__(exp_type, rtype=ExpressionReturnType.BOOLEAN,
children=children)
def evaluate(self, *args, **kwargs):
# cast in to numpy array
lvalues = self.get_child(0).evaluate(*args, **kwargs).frames.values
rvalues = self.get_child(1).evaluate(*args, **kwargs).frames.values
if len(lvalues) != len(rvalues):
if len(lvalues) == 1:
lvalues = np.repeat(lvalues, len(rvalues), axis=0)
elif len(rvalues) == 1:
rvalues = np.repeat(rvalues, len(lvalues), axis=0)
else:
raise Exception(
"Left and Right batch does not have equal elements")
if self.etype == ExpressionType.COMPARE_EQUAL:
return Batch(pd.DataFrame(lvalues == rvalues))
elif self.etype == ExpressionType.COMPARE_GREATER:
return Batch(pd.DataFrame(lvalues > rvalues))
elif self.etype == ExpressionType.COMPARE_LESSER:
return Batch(pd.DataFrame(lvalues < rvalues))
elif self.etype == ExpressionType.COMPARE_GEQ:
return Batch(pd.DataFrame(lvalues >= rvalues))
elif self.etype == ExpressionType.COMPARE_LEQ:
return Batch(pd.DataFrame(lvalues <= rvalues))
elif self.etype == ExpressionType.COMPARE_NEQ:
return Batch(pd.DataFrame(lvalues != rvalues))
elif self.etype == ExpressionType.COMPARE_CONTAINS:
res = [[all(x in p for x in q)
for p, q in zip(left, right)]
for left, right in zip(lvalues, rvalues)]
return Batch(pd.DataFrame(res))
elif self.etype == ExpressionType.COMPARE_IS_CONTAINED:
res = [[all(x in q for x in p)
for p, q in zip(left, right)]
for left, right in zip(lvalues, rvalues)]
return Batch( | pd.DataFrame(res) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.