prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
months = ['january', 'february', 'march', 'april', 'may', 'june','all']
days_of_week = ['Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','All']
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# user input for city (chicago, new york city, washington).
citylist = list(CITY_DATA.keys())
print("would you like to see data for",citylist)
while True:
city = input().lower()
if city not in CITY_DATA:
print("please try again")
continue
else:
break
# user input for month (all, january, february, ... , june)
print("which month?",months)
while True:
month = input().lower()
if month not in months:
print("please try again")
continue
else:
break
# user input for day of week (all, monday, tuesday, ... sunday)
print("which day?",days_of_week)
while True:
day = input().title()
if day not in days_of_week:
print("please try again")
continue
else:
break
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'All':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# the most common month
popular_month = df['month'].mode()[0]
# the most common day of week
popular_day = df['day_of_week'].mode()[0]
# the most common start hour
# convert the Start Time column to datetime
df['Start Time'] = | pd.to_datetime(df['Start Time']) | pandas.to_datetime |
## Basics
import os
import sys
import random
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
## To save a trained ML Models
import pickle
from sklearn.cluster import KMeans
## To calculate possible combinations of a histograms
from scipy.special import comb
## To round a vector without changing its sum
import iteround
## Our Library
#sys.path.append("../")
from bandipy import environment
from bandipy import experiment
from bandipy import prior
from bandipy import policy
from bandipy import plotter
from bandipy import datasets
## To reduce categorical product features into a unique id.
from category_encoders.hashing import HashingEncoder
## To encode pairs of integers as single integer values using the Cantor pairing algorithm
import pairing as pf
## For synthetic data generation
import keras
from keras.models import Sequential
from keras.layers import Dense
import tensorflow as tf
from keras import backend as K
## Class instances
plotter = plotter.Plotter()
mldata = datasets.MultiLabelDatasets()
criteodata = datasets.CriteoDatasets()
## To show where exactly a warning has happend
# import warnings
# warnings.filterwarnings('error')
## For the sake of reproducibility
random.seed(0)
np.random.seed(0)
# tf.set_random_seed(0)
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
tf.random.set_seed(0)
os.environ['PYTHONHASHSEED']=str(0)
class Simulation():
def __init__(self, data_type, bandit_algorithm, privacy_model, sim_sig):
self.data_type = data_type
self.bandit_algorithm = bandit_algorithm
self.privacy_model = privacy_model
self.sim_sig = sim_sig
def run_a_simulation(self, users, n_samples, n_actions, context_size,
contexts, responses, rec_policy, alpha,
given_agent, cb_sampling_rate, cb_neg_rew_sam_rate):
print("\n___________________________________________________\n")
history = dict()
shared_data = dict()
for u in users:
if u%500 == 0:
print((int)(((u-users[0])/len(users))*100), end=" - ")
if u%5000 == 0:
print("*")
env = environment.ContextualBanditEnv(context_s=contexts[u],
context_ns=None,
rewards=responses[u],
#rnd_seed=np.random.randint(len(users)),
rand_gen=np.random)
if rec_policy == "LinUCB":
agent = policy.LinUCB(n_actions, context_size)
elif rec_policy == "given":
agent = given_agent
exp = experiment.ExperimentWithSampling(env, agent)
exp.run_bandit(n_samples, alpha, cb_sampling_rate, cb_neg_rew_sam_rate)
history[u] = np.array(exp.con_act_rew_hist)
shared_data[u] = exp.shared_data
return history, shared_data
def update_on_data(self, shared_contexts, shared_actions, shared_responses,
n_actions, context_size, rec_policy):
contexts = np.zeros((1, context_size))
responses = np.zeros((1, n_actions))
env = environment.ContextualBanditEnv(context_s = contexts,
context_ns = None,
rewards = responses,
rand_gen = np.random)
if rec_policy == "LinUCB":
agent = policy.LinUCB(n_actions, context_size)
exp = experiment.ExperimentWithSampling(env, agent)
exp.run_bandit_on_data(shared_contexts, shared_actions, shared_responses)
shared_model= agent.get_model()
return shared_model
def prepare_shared_data(self, shared_data, context_size, n_actions):
shared_contexts = np.zeros((0, context_size))
shared_actions = np.zeros(0)
shared_responses = np.zeros(0)
for d in shared_data:
tmp = np.array(shared_data[d])
if len(tmp)!=0:
s, e = 0, context_size
shared_contexts = np.append(shared_contexts, tmp[:, s:e], axis=0)
shared_actions = np.append(shared_actions, tmp[:, -2], axis=0)
shared_responses = np.append(shared_responses, tmp[:, -1], axis=0)
return shared_contexts, shared_actions ,shared_responses
def thresholding(self, shared_data, priv_sc_threshold, bin_size, n_actions):
freqs_contexts = np.zeros(2**bin_size)
th_shared_data = {k: v for k, v in shared_data.items() if len(v)!=0}
for u in th_shared_data:
for d in th_shared_data[u]:
freqs_contexts[int(d[0])] +=1
n_removed = 0
for u in th_shared_data:
for i, d in enumerate(th_shared_data[u]):
if freqs_contexts[int(d[0])] < priv_sc_threshold:
th_shared_data[u].pop(i)
n_removed+=1
print("Number of Removed on Shuffler: ", n_removed)
return th_shared_data
def normalize_and_bound(self, data, dec_digits=1):
data = data.copy()
for i in range(len(data)):
nd = None
if data[i].sum() != 0:
nd = data[i]/data[i].sum()
else:
nd = np.array([1/len(data[i])]*len(data[i]))
data[i] = iteround.saferound(nd, dec_digits)
return data
"""
data_type:
- 'syn' for synthetic data,
- 'mlc' for multi-labe classification data,
- 'ads' for ad recommendations data.
"""
def run_simulation(self, n_users, early_frac,
n_samples, n_actions, context_size, with_data=False, **kwargs):
if with_data == False:
if self.data_type == 'syn':
ctr_scaling_factor = kwargs['ctr_scaling_factor']
resp_noise_level = kwargs['resp_noise_level']
mapping_function = kwargs['mapping_function']
data_builder = datasets.Synthetic(mapping_function)
contexts , responses = data_builder.generate_data(n_users, n_samples,
n_actions, context_size,
ctr_scaling_factor, resp_noise_level)
elif self.data_type == 'mlc':
if kwargs['mlc_dataset'] == "mediamill":
if n_samples*n_users > 43000:
print("This dataset does not include that much data. Pelase choose 'n_samples'*'n_users' < 43000")
return None
if n_actions > 40 or context_size > 40:
print("Please choose 'n_actions' and 'context_size' <= 40! (Or contribute by modifying the code :))")
return None
contexts_mm1, contexts_mm2, responses = mldata.splitted_mediamill(N=n_users,
red_K= n_actions,
shuffle=False,
verbose=False,
focus="context")
contexts = list()
for u in range(len(contexts_mm1)):
if context_size > 10:
contexts.append(np.concatenate((contexts_mm1[u],
contexts_mm2[u][:,:context_size-10]),
axis=1))
else:
contexts.append(np.array(contexts_mm1[u][:,:context_size]))
for i in range(len(contexts)):
contexts[i] = self.normalize_and_bound(contexts[i])
elif kwargs['mlc_dataset'] == "tmc":
if n_samples*n_users > 28000:
print("This dataset does not include that much data. Pelase choose 'n_samples'*'n_users' < 28000")
return None
if n_actions > 22:
print("Please choose 'n_actions' <= 22 (The dataset does not support more than this)")
return None
contexts, _, responses = mldata.splitted_tmc(N=n_users, Km= context_size,
Ksm=1,
shuffle=False,
verbose=False,
focus="context")
if n_actions < 22:
for u in range(len(responses)):
responses[u] = responses[u][:,:n_actions]
for i in range(len(contexts)):
contexts[i] = self.normalize_and_bound(contexts[i])
elif self.data_type == 'ads':
if kwargs['ads_dataset'] == "criteo_kaggle":
if context_size != 10:
print("For this dataset you can only have 'context_size'==10.")
return None
cr_f_name = kwargs['ads_f_name']
if kwargs['ads_build'] == True:
prc = n_actions
data_file = | pd.read_csv("Criteo/"+cr_f_name+".csv") | pandas.read_csv |
#! /usr/bin/python3
import sys
import argparse
from os import listdir
import string
import pandas as pd
from xml.dom.minidom import parse
class Scratcher():
def __init__(self):
args = self.parse_arguments()
self.datadir = args["datadir"]
self.outfile_name = args["outfile"]
self.f = open(self.outfile_name, "w+")
print("Starting the process of directory " + self.datadir + " saved in " + self.outfile_name)
def parse_arguments(self):
# construct the argument parser
parser = argparse.ArgumentParser()
parser.add_argument('-datadir', '--datadir', type=str, default="data/train/", help='Directory with XML files to process')
parser.add_argument('-outfile', '--outfile', type=str, default="info.out", help='Name for the output file')
args = vars(parser.parse_args())
return args
def createMap(self, token, type):
return {'name': token[0], 'offset': str(token[1])+"-" +str(token[2]), 'type': type}
def scratchInfo(self):
self.info = []
# process each file in directory
for f in listdir(self.datadir):
# parse XML file , obtaining a DOM tree
tree = parse(self.datadir + "/" + f)
# process each sentence in the file
sentences = tree.getElementsByTagName("sentence")
for s in sentences:
info = s.getElementsByTagName("entity")
for i in info:
sid = i.attributes["id"].value # get sentence id
stext = i.attributes["text"].value # get sentence text
itype = i.attributes["type"].value
self.info.append( {'name': stext, 'type': itype})
# print sentence entities in format requested for evaluation
print(sid + "|" + stext + "|" + itype, file = self.f)
self.f.close()
# create statistics
self.searchSuffixes()
def searchSuffixes(self):
self.suffixes = {}
for info in self.info:
if info["type"] not in self.suffixes:
self.suffixes[info["type"]] = {}
if info["name"][-3:] not in self.suffixes[info["type"]]:
self.suffixes[info["type"]][info["name"][-3:]] = 0
self.suffixes[info["type"]][info["name"][-3:]] +=1
a = | pd.DataFrame.from_dict(self.suffixes) | pandas.DataFrame.from_dict |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas._tseries import Timestamp
import pandas._tseries as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, minute,
second, base, mult)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, base, mult)
self.freq = _freq_mod._get_freq_str(base, mult)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __add__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal + other, self.freq)
raise ValueError("Cannot add with non-integer value")
def __sub__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal - other, self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
raise ValueError("Cannot sub with non-integer value")
def asfreq(self, freq=None, how='E'):
"""
Parameters
----------
freq :
how :
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
new_ordinal = lib.period_asfreq(self.ordinal, base1, mult1,
base2, mult2, how)
return Period(new_ordinal, (base2, mult2))
def start_time(self):
return self.to_timestamp(which_end='S')
def end_time(self):
return self.to_timestamp(which_end='E')
def to_timestamp(self, which_end='S'):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
which_end: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
which_end = _validate_end_alias(which_end)
new_val = self.asfreq('S', which_end)
base, mult = _gfc(new_val.freq)
return Timestamp(lib.period_ordinal_to_dt64(new_val.ordinal, base, mult))
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year(self.ordinal, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month(self.ordinal, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear(self.ordinal, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.get_period_quarter(self.ordinal, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.get_period_day(self.ordinal, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.get_period_week(self.ordinal, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.get_period_weekday(self.ordinal, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.get_period_dow(self.ordinal, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.get_period_doy(self.ordinal, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.get_period_hour(self.ordinal, base, mult)
@property
def minute(self):
base, mult = _gfc(self.freq)
return lib.get_period_minute(self.ordinal, base, mult)
@property
def second(self):
base, mult = _gfc(self.freq)
return lib.get_period_second(self.ordinal, base, mult)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
freqstr = _freq_mod._reverse_period_code_map[base]
if mult == 1:
return "Period('%s', '%s')" % (formatted, freqstr)
return ("Period('%s', '%d%s')" % (formatted, mult, freqstr))
def __str__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
return ("%s" % formatted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`format`. :keyword:`format` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatting & docs originally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalent of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range really is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the last month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
if fmt is not None:
return lib.period_strftime(self.ordinal, base, mult, fmt)
else:
return lib.period_ordinal_to_string(self.ordinal, base, mult)
def _period_unbox(key, check=None):
'''
Period-like => int64
'''
if not isinstance(key, Period):
key = Period(key, freq=check)
elif check is not None:
if key.freq != check:
raise ValueError("%s is wrong freq" % key)
return np.int64(key.ordinal)
def _period_unbox_array(arr, check=None):
if arr is None:
return arr
unboxer = np.frompyfunc(lambda x: _period_unbox(x, check=check), 1, 1)
return unboxer(arr)
def _period_box(val, freq):
return Period(val, freq=freq)
def _period_box_array(arr, freq):
if arr is None:
return arr
if not isinstance(arr, np.ndarray):
return arr
boxfunc = lambda x: _period_box(x, freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(arr)
def dt64arr_to_periodarr(data, freq):
if data is None:
return data
if isinstance(freq, basestring):
base, mult = _gfc(freq)
else:
base, mult = freq
return lib.dt64arr_to_periodarr(data.view('i8'), base, mult)
# --- Period index sketch
class PeriodIndex(Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
"""
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = datetools.get_standard_freq(freq)
if data is None:
if start is None and end is None:
raise ValueError('Must specify start, end, or data')
start = to_period(start, freq)
end = to_period(end, freq)
is_start_intv = isinstance(start, Period)
is_end_intv = isinstance(end, Period)
if (start is not None and not is_start_intv):
raise ValueError('Failed to convert %s to period' % start)
if (end is not None and not is_end_intv):
raise ValueError('Failed to convert %s to period' % end)
if is_start_intv and is_end_intv and (start.freq != end.freq):
raise ValueError('Start and end must have same freq')
if freq is None:
if is_start_intv:
freq = start.freq
elif is_end_intv:
freq = end.freq
else:
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
if start is None or end is None:
msg = 'Must specify both start and end if periods is None'
raise ValueError(msg)
data = np.arange(start.ordinal, end.ordinal+1, dtype=np.int64)
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, Period):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype='i8')
except:
data = np.array(data, dtype='O')
if freq is None:
raise ValueError('freq cannot be none')
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(data.values, base1, mult1,
base2, mult2, 'E')
else:
if freq is None:
raise ValueError('freq cannot be none')
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
data = data.astype('i8')
data = np.array(data, dtype=np.int64, copy=False)
if (data <= 0).any():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
@property
def is_all_dates(self):
return True
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
if isinstance(freq, basestring):
base2, mult2 = _gfc(freq)
else:
base2, mult2 = freq
new_data = lib.period_asfreq_arr(self.values,
base1, mult1,
base2, mult2, how)
return PeriodIndex(new_data, freq=freq)
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year_arr(self.values, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return | lib.get_period_month_arr(self.values, base, mult) | pandas._tseries.get_period_month_arr |
import rba
import copy
import pandas
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'TranslationM'], external_annotations=None):
out = []
for i in model_processes:
out += [rba_session.ModelStructure.ProteinInfo.Elements[j]['ProtoID']
for j in list(rba_session.ModelStructure.ProcessInfo.Elements[i]['Composition'].keys()) if j in rba_session.ModelStructure.ProteinInfo.Elements.keys()]
if external_annotations is not None:
out += list(external_annotations['ID'])
return(list(set(out)))
def build_model_compartment_map(rba_session):
out = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[i]['Compartment'] for i in list(
rba_session.ModelStructure.ProteinInfo.Elements.keys())}
return(out)
def build_compartment_annotations(Compartment_Annotations_external, model_protein_compartment_map):
for i in Compartment_Annotations_external.index:
if Compartment_Annotations_external.loc[i, 'ID'] in list(model_protein_compartment_map.keys()):
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 1
else:
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 0
Compartment_Annotations_internal = pandas.DataFrame()
Compartment_Annotations_internal['ID'] = list(model_protein_compartment_map.keys())
Compartment_Annotations_internal['ModelComp'] = list(model_protein_compartment_map.values())
Compartment_Annotations = pandas.concat(
[Compartment_Annotations_internal, Compartment_Annotations_external.loc[Compartment_Annotations_external['modelproteinannotation'] == 0, ['ID', 'ModelComp']]], axis=0)
return(Compartment_Annotations)
def build_dataset_annotations(input, ID_column, Uniprot, Compartment_Annotations, model_protein_compartment_map, ribosomal_proteins):
print('riboprots-----------------')
print(ribosomal_proteins)
out = pandas.DataFrame()
for g in list(input[ID_column]):
out.loc[g, 'ID'] = g
matches = [i for i in list(Uniprot.loc[pandas.isna(
Uniprot['Gene names']) == False, 'Gene names']) if g in i]
mass_prot = numpy.nan
if len(matches) > 0:
mass_prot = len(Uniprot.loc[Uniprot['Gene names'] == matches[0], 'Sequence'].values[0])
out.loc[g, 'AA_residues'] = mass_prot
if g in list(Compartment_Annotations['ID']):
out.loc[g, 'Location'] = Compartment_Annotations.loc[Compartment_Annotations['ID']
== g, 'ModelComp'].values[0]
in_model = 0
if g in model_protein_compartment_map.keys():
in_model = 1
is_ribosomal = 0
if g in ribosomal_proteins:
is_ribosomal = 1
out.loc[g, 'InModel'] = in_model
out.loc[g, 'IsRibosomal'] = is_ribosomal
return(out)
def build_full_annotations_from_dataset_annotations(annotations_list):
out = pandas.concat(annotations_list, axis=0)
index = out.index
is_duplicate = index.duplicated(keep="first")
not_duplicate = ~is_duplicate
out = out[not_duplicate]
return(out)
def infer_copy_numbers_from_reference_copy_numbers(fold_changes, absolute_data, matching_column_in_fold_change_data, matching_column_in_absolute_data, conditions_in_fold_change_data_to_restore):
out = pandas.DataFrame()
for i in list(absolute_data['Gene']):
if i in list(fold_changes['Gene']):
FoldChange_match = fold_changes.loc[fold_changes['Gene']
== i, matching_column_in_fold_change_data].values[0]
CopyNumber_match = absolute_data.loc[absolute_data['Gene']
== i, matching_column_in_absolute_data].values[0]
if not pandas.isna(FoldChange_match):
if not pandas.isna(CopyNumber_match):
out.loc[i, 'ID'] = i
out.loc[i, 'Absolute_Reference'] = CopyNumber_match/(2**FoldChange_match)
for gene in list(out['ID']):
Abs_Ref = out.loc[gene, 'Absolute_Reference']
for condition in conditions_in_fold_change_data_to_restore:
out.loc[gene, condition] = Abs_Ref * \
(2**fold_changes.loc[fold_changes['Gene'] == gene, condition].values[0])
return(out)
def add_annotations_to_proteome(input, ID_column, annotations):
for i in input.index:
if input.loc[i, ID_column] in annotations.index:
input.loc[i, 'AA_residues'] = annotations.loc[input.loc[i, ID_column], 'AA_residues']
input.loc[i, 'Location'] = annotations.loc[input.loc[i, ID_column], 'Location']
input.loc[i, 'InModel'] = annotations.loc[input.loc[i, ID_column], 'InModel']
input.loc[i, 'IsRibosomal'] = annotations.loc[input.loc[i, ID_column], 'IsRibosomal']
return(input)
def determine_compartment_occupation(Data, Condition, mass_col='AA_residues', only_in_model=False, compartments_to_ignore=['DEF'], compartments_no_original_PG=[], ribosomal_proteins_as_extra_compartment=True):
for i in compartments_to_ignore:
Data = Data.loc[Data['Location'] != i]
for i in compartments_no_original_PG:
Data = Data.loc[(Data['Location'] != i) | (Data['InModel'] == 1)]
if only_in_model:
Data = Data.loc[Data['InModel'] >= 1]
if ribosomal_proteins_as_extra_compartment:
Data_R = Data.loc[Data['IsRibosomal'] == 1].copy()
Data = Data.loc[Data['IsRibosomal'] == 0]
Data_R_df = Data_R.loc[:, [Condition, mass_col, 'Location']]
Data_R_df[Condition] = Data_R_df[Condition]*Data_R_df[mass_col]
Ribosomal_sum = Data_R_df[Condition].sum()
df = Data.loc[:, [Condition, mass_col, 'Location']]
df[Condition] = df[Condition]*df[mass_col]
out = pandas.DataFrame(df.groupby('Location').sum())
if ribosomal_proteins_as_extra_compartment:
out.loc['Ribosomes', Condition] = Ribosomal_sum
out.loc['Total', Condition] = out[Condition].sum()
out.loc[:, 'original_protein_fraction'] = out[Condition]/out.loc['Total', Condition]
out.rename(columns={Condition: 'original_amino_acid_occupation'}, inplace=True)
out.drop(columns=['AA_residues'], inplace=True)
return(out)
def build_proteome_overview(input, condition, compartments_to_ignore=['DEF', 'DEFA', 'Def'], compartments_no_original_PG=['n', 'Secreted'], ribosomal_proteins_as_extra_compartment=True):
out = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=False)
out_in_model = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=True)
out['original_PG_fraction'] = 1-out_in_model['original_amino_acid_occupation'] / \
out['original_amino_acid_occupation']
return(out)
def determine_correction_factor_A(fractions_entirely_replaced_with_expected_value):
expected_fraction_sum = 0
for i in fractions_entirely_replaced_with_expected_value.keys():
expected_fraction_sum += fractions_entirely_replaced_with_expected_value[i]
factor = 1/(1-expected_fraction_sum)
return(factor)
def determine_correction_factor_B(imposed_compartment_fractions):
expected_fractions = 0
for i in imposed_compartment_fractions.keys():
expected_fractions += imposed_compartment_fractions[i]
factor = 1-expected_fractions
return(factor)
def determine_correction_factor_C(input, condition, reference_condition):
return(input.loc[input['ID'] == 'Total_protein', condition].values[0]/input.loc[input['ID'] == 'Total_protein', reference_condition].values[0])
def correct_protein_fractions(input, factors, directly_corrected_compartments, imposed_compartment_fractions):
out = input.copy()
for c in out.index:
if c in directly_corrected_compartments:
out.loc[c, 'new_protein_fraction'] = out.loc[c,
'original_protein_fraction']*factors['A']*factors['B']
elif c in imposed_compartment_fractions.keys():
out.loc[c, 'new_protein_fraction'] = imposed_compartment_fractions[c]
return(out)
def correct_PG_fraction(input, factors, compartments_no_original_PG, merged_compartments):
out = input.copy()
for c in out.index:
if c == 'Total':
continue
else:
if c in compartments_no_original_PG:
original_fraction = out.loc[c, 'original_protein_fraction']
out.loc[c, 'new_PG_fraction'] = 1 - ((factors['A']*factors['B']*original_fraction) /
out.loc[c, 'new_protein_fraction'])
elif c in merged_compartments.keys():
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']*out.loc[c, 'original_protein_fraction']/(
out.loc[c, 'original_protein_fraction']+out.loc[merged_compartments[c], 'original_protein_fraction'])
else:
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']
return(out)
def merge_compartments(input, merged_compartments):
out = input.copy()
for c in merged_compartments.keys():
out.loc[c, 'new_protein_fraction'] = out.loc[c, 'new_protein_fraction'] + \
out.loc[merged_compartments[c], 'new_protein_fraction']
return(out)
def calculate_new_total_PG_fraction(input):
out = input.copy()
fraction = 0
for c in out.index:
if c not in ['Total', 'Ribosomes']:
fraction += out.loc[c, 'new_protein_fraction']*out.loc[c, 'new_PG_fraction']
out.loc['Total', 'new_PG_fraction'] = fraction
out.loc['Total', 'new_protein_fraction'] = 1
return(out)
def determine_apparent_process_efficiencies(growth_rate, input, rba_session, proteome_summary, protein_data, condition, gene_id_col):
process_efficiencies = pandas.DataFrame()
for i in input.index:
process_ID = input.loc[i, 'Process_ID']
process_name = input.loc[i, 'Process_Name']
process_client_compartments = input.loc[i, 'Client_Compartments'].split(' , ')
constituting_proteins = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[
i]['AAnumber'] for i in rba_session.ModelStructure.ProcessInfo.Elements[process_name]['Composition'].keys()}
Total_client_fraction = sum([proteome_summary.loc[i, 'new_protein_fraction']
for i in process_client_compartments])
n_AAs_in_machinery = 0
machinery_size = 0
for i in constituting_proteins.keys():
if i in protein_data['ID']:
protein_data.loc[protein_data['ID'] == i, ]
n_AAs_in_machinery += protein_data.loc[protein_data['ID'] == i, condition].values[0] * \
protein_data.loc[protein_data['ID'] == i, 'AA_residues'].values[0]
machinery_size += constituting_proteins[i]
# right reference amounth?
if n_AAs_in_machinery > 0:
relative_Protein_fraction_of_machinery = n_AAs_in_machinery / \
proteome_summary.loc['Total', 'original_amino_acid_occupation']
specific_capacity = growth_rate*Total_client_fraction/relative_Protein_fraction_of_machinery
apparent_capacity = specific_capacity*machinery_size
# process_ID[process_name] = apparent_capacity
process_efficiencies.loc[process_name, 'Process'] = process_ID
process_efficiencies.loc[process_name, 'Parameter'] = str(
process_ID+'_apparent_efficiency')
process_efficiencies.loc[process_name, 'Value'] = apparent_capacity
return(process_efficiencies)
def correction_pipeline(input, condition, compartments_to_ignore, compartments_no_original_PG, fractions_entirely_replaced_with_expected_value, imposed_compartment_fractions, directly_corrected_compartments, merged_compartments):
out = build_proteome_overview(input=input, condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=True)
factor_A = determine_correction_factor_A(fractions_entirely_replaced_with_expected_value={
i: imposed_compartment_fractions[i] for i in fractions_entirely_replaced_with_expected_value})
factor_B = determine_correction_factor_B(
imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_protein_fractions(input=out, factors={
'A': factor_A, 'B': factor_B}, directly_corrected_compartments=directly_corrected_compartments, imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_PG_fraction(input=out, factors={
'A': factor_A, 'B': factor_B}, compartments_no_original_PG=compartments_no_original_PG, merged_compartments=merged_compartments)
out = merge_compartments(input=out, merged_compartments=merged_compartments)
out = calculate_new_total_PG_fraction(input=out)
out.to_csv(str('Correction_overview_'+condition+'.csv'))
return({'Summary': out, 'Correction_factors': {'A': factor_A, 'B': factor_B}})
def build_input_for_default_kapp_estimation(input):
out = pandas.DataFrame(columns=['Compartment_ID', 'Density', 'PG_fraction'])
for i in input['Summary'].index:
if i not in ['Total', 'Ribosomes']:
out.loc[i, 'Compartment_ID'] = i
out.loc[i, 'Density'] = input['Summary'].loc[i, 'new_protein_fraction']
out.loc[i, 'PG_fraction'] = input['Summary'].loc[i, 'new_PG_fraction']
return(out)
def flux_bounds_from_input(input, condition, specific_exchanges=None):
flux_mean_df = input.loc[input['Type'] == 'ExchangeFlux_Mean', :]
flux_mean_SE = input.loc[input['Type'] == 'ExchangeFlux_StandardError', :]
out = pandas.DataFrame(columns=['Reaction_ID', 'LB', 'UB'])
if specific_exchanges is None:
exchanges_to_set = list(flux_mean_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
mean_val = flux_mean_df.loc[flux_mean_df['ID'] == rx, condition].values[0]
if not pandas.isna(mean_val):
SE_val = flux_mean_SE.loc[flux_mean_SE['ID'] == str(rx+'_SE'), condition].values[0]
out.loc[rx, 'Reaction_ID'] = rx
if not pandas.isna(SE_val):
lb = mean_val-SE_val
ub = mean_val+SE_val
if mean_val < 0:
out.loc[rx, 'LB'] = lb
if ub > 0:
out.loc[rx, 'UB'] = 0
else:
out.loc[rx, 'UB'] = ub
elif mean_val > 0:
out.loc[rx, 'UB'] = ub
if lb < 0:
out.loc[rx, 'LB'] = 0
else:
out.loc[rx, 'LB'] = lb
else:
out.loc[rx, 'LB'] = lb
out.loc[rx, 'UB'] = ub
else:
out.loc[rx, 'LB'] = mean_val
out.loc[rx, 'UB'] = mean_val
flux_dir_df = input.loc[input['Type'] == 'Flux_Direction', :]
if specific_exchanges is None:
exchanges_to_set = list(flux_dir_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
out.loc[rx, 'Reaction_ID'] = rx
if flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 1:
out.loc[rx, 'LB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == -1:
out.loc[rx, 'UB'] = 0
elif flux_dir_df.loc[flux_dir_df['ID'] == rx, condition].values[0] == 0:
out.loc[rx, 'LB'] = 0
out.loc[rx, 'UB'] = 0
flux_upper_df = input.loc[input['Type'] == 'Flux_Upper_Bound', :]
for rx in list(flux_upper_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'UB'] = flux_upper_df.loc[flux_upper_df['ID'] == rx, condition].values[0]
flux_lower_df = input.loc[input['Type'] == 'Flux_Lower_Bound', :]
for rx in list(flux_lower_df['ID']):
out.loc[rx, 'Reaction_ID'] = rx
out.loc[rx, 'LB'] = flux_lower_df.loc[flux_lower_df['ID'] == rx, condition].values[0]
return(out)
def growth_Rate_from_input(input, condition):
return(input.loc[input['Type'] == 'Growth_Rate', condition].values[0])
def proteome_fractions_from_input(input, condition):
df = input.loc[input['Type'] == 'Expected_ProteomeFraction', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def medium_concentrations_from_input(input, condition):
df = input.loc[input['Type'] == 'Medium_Concentration', :]
return(dict(zip(list(df['ID']), list(df[condition]))))
def build_input_proteome_for_specific_kapp_estimation(proteomics_data, condition):
out = pandas.DataFrame()
out['ID'] = proteomics_data['ID']
out['copy_number'] = proteomics_data[condition]
return(out)
def inject_estimated_efficiencies_into_model(rba_session, specific_kapps=None, default_kapps=None, process_efficiencies=None, round_to_digits=0):
"""
Parameters
----------
specific_kapps : pandas.DataFrame(columns=['Enzyme_ID','Kapp'])
default_kapps : {'default_kapp':value,'default_transporter_kapp':value}
process_efficiencies : pandas.DataFrame(columns=['Process','Parameter','Value'])
"""
if specific_kapps is not None:
parameterized = []
for enz in list(specific_kapps['Enzyme_ID']):
if not pandas.isna(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if enz not in parameterized:
all_enzs = rba_session.ModelStructure.EnzymeInfo.Elements[enz]['Isozymes']
all_enzs.append(enz)
parameterized += all_enzs
if len(all_enzs) == 1:
proto_enz = all_enzs[0]
else:
proto_enz = [i for i in all_enzs if not '_duplicate_' in i][0]
val = round(specific_kapps.loc[specific_kapps['Enzyme_ID']
== enz, 'Kapp'].values[0], round_to_digits)
const = rba.xml.parameters.Function(
str(proto_enz + '_kapp__constant'), 'constant', parameters={'CONSTANT': val}, variable=None)
if str(proto_enz + '_kapp__constant') not in rba_session.model.parameters.functions._elements_by_id.keys():
rba_session.model.parameters.functions.append(const)
else:
rba_session.model.parameters.functions._elements_by_id[const.id] = const
count = 0
for e in rba_session.model.enzymes.enzymes:
if e.id in all_enzs:
count += 1
e.forward_efficiency = str(proto_enz + '_kapp__constant')
e.backward_efficiency = str(proto_enz + '_kapp__constant')
if count == len(all_enzs):
break
if default_kapps is not None:
if type(default_kapps) is dict:
rba_session.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapps['default_kapp']
rba_session.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = default_kapps['default_transporter_kapp']
if process_efficiencies is not None:
for i in process_efficiencies.index:
if process_efficiencies.loc[i, 'Process'] in rba_session.model.processes.processes._elements_by_id.keys():
if not pandas.isna(process_efficiencies.loc[i, 'Value']):
rba_session.model.processes.processes._elements_by_id[process_efficiencies.loc[i,
'Process']].machinery.capacity.value = process_efficiencies.loc[i, 'Parameter']
const = rba.xml.parameters.Function(process_efficiencies.loc[i, 'Parameter'], 'constant', parameters={
'CONSTANT': process_efficiencies.loc[i, 'Value']}, variable=None)
if process_efficiencies.loc[i, 'Parameter'] not in rba_session.model.parameters.functions._elements_by_id.keys():
rba_session.model.parameters.functions.append(const)
else:
rba_session.model.parameters.functions._elements_by_id[const.id] = const
rba_session.rebuild_from_model()
def calibration_workflow(proteome,
condition,
reference_condition,
gene_ID_column,
definition_file,
rba_session,
process_efficiency_estimation_input=None,
default_kapps_provided=None):
t0 = time.time()
correction_results = correction_pipeline(input=proteome,
condition=condition,
compartments_to_ignore=['DEF', 'DEFA', 'Def'],
compartments_no_original_PG=['n', 'Secreted'],
fractions_entirely_replaced_with_expected_value=[
'Ribosomes'],
imposed_compartment_fractions=proteome_fractions_from_input(
input=definition_file, condition=condition),
directly_corrected_compartments=[
'c', 'cM', 'erM', 'gM', 'm', 'mIM', 'mIMS', 'mOM', 'vM', 'x'],
merged_compartments={'c': 'Ribosomes'})
# mumax0 = rba_session.findMaxGrowthRate()
rba_session.setMedium(medium_concentrations_from_input(
input=definition_file, condition=condition))
# mumax1 = rba_session.findMaxGrowthRate()
if process_efficiency_estimation_input is not None:
process_efficiencies = determine_apparent_process_efficiencies(growth_rate=growth_Rate_from_input(
input=definition_file, condition=condition), input=process_efficiency_estimation_input, rba_session=rba_session, protein_data=proteome, proteome_summary=correction_results['Summary'], condition=condition, gene_id_col=gene_ID_column)
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=None, default_kapps=None, process_efficiencies=process_efficiencies)
else:
process_efficiencies = None
protein_scaling_coefficient = 1000 * determine_correction_factor_C(input=definition_file, condition=condition, reference_condition=reference_condition) * \
correction_results['Correction_factors']['A'] * \
correction_results['Correction_factors']['B']/6.022e23
# protein_scaling_coefficient = 1000 * correction_results['Correction_factors']['A'] * correction_results['Correction_factors']['B']/6.022e23
proteome[condition] *= protein_scaling_coefficient
Specific_Kapps = rba_session.estimate_specific_Kapps(proteomicsData=build_input_proteome_for_specific_kapp_estimation(proteome, condition),
flux_bounds=flux_bounds_from_input(
input=definition_file, condition=condition, specific_exchanges=None),
mu=growth_Rate_from_input(
input=definition_file, condition=condition),
biomass_function=None,
target_biomass_function=True)
# Specific_Kapps.loc[(Specific_Kapps['Kapp'] <= 1000000) &
# (Specific_Kapps['Kapp'] >= 1), 'Kapp'].hist()
# plt.show()
# mumax2 = rba_session.findMaxGrowthRate()
if default_kapps_provided is None:
Default_Kapps = rba_session.estimate_default_Kapps(target_mu=growth_Rate_from_input(input=definition_file, condition=condition), compartment_densities_and_PGs=build_input_for_default_kapp_estimation(
correction_results), flux_bounds=flux_bounds_from_input(input=definition_file, condition=condition, specific_exchanges=None), mu_approximation_precision=0.01)
inject_estimated_efficiencies_into_model(rba_session, specific_kapps=None, default_kapps={
'default_kapp': Default_Kapps.iloc[-1, 2], 'default_transporter_kapp': Default_Kapps.iloc[-1, 3]}, process_efficiencies=None)
else:
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=None, default_kapps=default_kapps_provided, process_efficiencies=None)
Default_Kapps = default_kapps_provided
inject_estimated_efficiencies_into_model(
rba_session, specific_kapps=Specific_Kapps, default_kapps=None, process_efficiencies=None)
# mumax3 = rba_session.findMaxGrowthRate()
compartment_densities_and_PGs = build_input_for_default_kapp_estimation(correction_results)
for comp in list(compartment_densities_and_PGs['Compartment_ID']):
rba_session.model.parameters.functions._elements_by_id[str(
'fraction_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'Density']
rba_session.model.parameters.functions._elements_by_id[str(
'fraction_non_enzymatic_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'PG_fraction']
rba_session.rebuild_from_model()
rba_session.addExchangeReactions()
rba_session.setMedium(medium_concentrations_from_input(
input=definition_file, condition=condition))
# FBs = flux_bounds_from_input(
# input=definition_file, condition=condition, specific_exchanges=None)
#rba_session.Problem.setLB(dict(zip(list(FBs['Reaction_ID']), list(FBs['LB']))))
# rba_session.Problem.setLB({FBs.loc[i, 'Reaction_ID']: FBs.loc[i, 'LB']
# for i in FBs.index if not pandas.isna(FBs.loc[i, 'LB'])})
# rba_session.Problem.setLB({FBs.loc[i, 'Reaction_ID']: FBs.loc[i, 'UB']
# for i in FBs.index if not pandas.isna(FBs.loc[i, 'UB'])})
#rba_session.Problem.setUB(dict(zip(list(FBs['Reaction_ID']), list(FBs['UB']))))
rba_session.Problem.setLB({'R_EX_cys__L_e': 0, 'R_EX_met__L_e': 0})
rba_session.Problem.setUB({'R_EX_cys__L_e': 0, 'R_EX_met__L_e': 0})
mumax4 = rba_session.findMaxGrowthRate()
rba_session.recordResults('Prokaryotic')
prok_results = copy.deepcopy(rba_session.Results)
rba_session2 = copy.copy(rba_session)
rba_session2.eukaryoticDensities4(CompartmentRelationships=False)
mumax5 = rba_session2.findMaxGrowthRate()
rba_session2.recordResults('Eukaryotic')
# print([Default_Kapps.iloc[-1, 2], Default_Kapps.iloc[-1, 3]])
# print([growth_Rate_from_input(input=definition_file,
# condition=condition), mumax0, mumax1, mumax2, mumax3, mumax4, mumax5])
print(time.time() - t0)
return({'Simulation_Results': prok_results, 'Simulation_Results_Euk': copy.deepcopy(rba_session2.Results), 'Proteome': build_input_proteome_for_specific_kapp_estimation(proteome, condition), 'Correction_Results': correction_results, 'Default_Kapps': Default_Kapps, 'Specific_Kapps': Specific_Kapps, 'Process_Efficiencies': process_efficiencies})
# seaborn.violinplot(x=Specific_Kapps.loc[Specific_Kapps['Kapp'] <= 400000, 'Kapp'])
# Specific_Kapps.loc[(Specific_Kapps['Kapp'] <= 1000000) &
# (Specific_Kapps['Kapp'] >= 1), 'Kapp']).hist()
# plt.show()
# Test predictions
# Given medium predict Mu, Exchanges and Proteome
# Prokaryotic
# Eukaryotic
# 1. import model and uniprot-file and compartment-annotation
## external_annotations for ribosomal-proteins!!! ##
## process-efficiency estimation input ##
## parse input-data properly and add Lahtvee information ##
print('---------------------START----------------------')
Input_Data = pandas.read_csv(
'DataSetsYeastRBACalibration/Calibration_InputDefinition.csv', sep=';', decimal=',', index_col=0)
Process_Efficiency_Estimation_Input = pandas.read_csv(
'DataSetsYeastRBACalibration/Process_Efficiency_Estimation_Input.csv', sep=';', decimal=',')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Uniprot = pandas.read_csv('Yeast_iMM904_RBA_model/uniprot.csv', sep='\t')
Compartment_Annotations_external = pandas.read_csv(
'DataSetsYeastRBACalibration/Manually_curated_Protein_Locations_for_Calibration.csv', index_col=None, sep=';')
Ribosomal_Proteins_Uniprot = pandas.read_csv(
'DataSetsYeastRBACalibration/uniprot_ribosomal_proteins.csv', index_col=None, sep=';')
Hackett_Clim_FCs = pandas.read_csv('DataSetsYeastRBACalibration/Hacket_Clim_ProteinFCs.csv')
Lahtvee_REF = pandas.read_csv('DataSetsYeastRBACalibration/LahtveeRefProteomicsData.csv')
picogram_togram_coefficient = 1e12
Lahtvee_REF['Lahtvee_REF'] *= picogram_togram_coefficient
Lahtvee_REF = Lahtvee_REF.loc[pandas.isna(Lahtvee_REF['Lahtvee_REF']) == False]
ribosomal_proteins = find_ribosomal_proteins(rba_session=Simulation, model_processes=[
'TranslationC', 'TranslationM'], external_annotations=Ribosomal_Proteins_Uniprot)
model_protein_compartment_map = build_model_compartment_map(rba_session=Simulation)
Compartment_Annotations = build_compartment_annotations(
Compartment_Annotations_external=Compartment_Annotations_external, model_protein_compartment_map=model_protein_compartment_map)
print('Annotations to data')
annotations_Lahtvee = build_dataset_annotations(input=Lahtvee_REF, ID_column='Gene', Uniprot=Uniprot,
Compartment_Annotations=Compartment_Annotations, model_protein_compartment_map=model_protein_compartment_map, ribosomal_proteins=ribosomal_proteins)
annotations_Hackett = build_dataset_annotations(input=Hackett_Clim_FCs, ID_column='Gene', Uniprot=Uniprot,
Compartment_Annotations=Compartment_Annotations, model_protein_compartment_map=model_protein_compartment_map, ribosomal_proteins=ribosomal_proteins)
full_annotations = build_full_annotations_from_dataset_annotations(
annotations_list=[annotations_Lahtvee, annotations_Hackett])
####### Bootstrapping-loop starts here #######
restored_Hackett_Data = infer_copy_numbers_from_reference_copy_numbers(fold_changes=Hackett_Clim_FCs, absolute_data=Lahtvee_REF, matching_column_in_fold_change_data='Hackett_C01',
matching_column_in_absolute_data='Lahtvee_REF', conditions_in_fold_change_data_to_restore=['Hackett_C005', 'Hackett_C01', 'Hackett_C016', 'Hackett_C022', 'Hackett_C03'])
restored_Hackett_Data = add_annotations_to_proteome(
input=restored_Hackett_Data, ID_column='ID', annotations=full_annotations)
Lahtvee_REF = add_annotations_to_proteome(
input=Lahtvee_REF, ID_column='Gene', annotations=full_annotations)
# default_kapps_provided={'default_kapp':39673 , 'default_transporter_kapp':396730 }
# default_kapps_provided={'default_kapp':85449 , 'default_transporter_kapp':854490 }
# default_kapps_provided={'default_kapp':128174 , 'default_transporter_kapp':1281740 }
# default_kapps_provided={'default_kapp':280762 , 'default_transporter_kapp':2807620 }
# default_kapps_provided = {'default_kapp': 268555, 'default_transporter_kapp': 2685550}
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C005 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C005', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 39673, 'default_transporter_kapp': 396730})
print('0.05')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C01 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C01', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 85449, 'default_transporter_kapp': 854490})
print('0.1')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C016 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C016', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 128174, 'default_transporter_kapp': 1281740})
print('0.16')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C022 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C022', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 280762, 'default_transporter_kapp': 2807620})
print('0.22')
print('')
print('')
print('')
print('')
Simulation = RBA_Session('Yeast_iMM904_RBA_model')
Calibration_Hackett_C03 = calibration_workflow(proteome=restored_Hackett_Data, condition='Hackett_C03', reference_condition='Lahtvee_REF', gene_ID_column='Gene',
definition_file=Input_Data, rba_session=Simulation, process_efficiency_estimation_input=Process_Efficiency_Estimation_Input, default_kapps_provided={'default_kapp': 280762, 'default_transporter_kapp': 2807620})
print('0.3')
specKapps_005 = pandas.DataFrame(index=list(
Calibration_Hackett_C005['Specific_Kapps']['Enzyme_ID']))
specKapps_005['Hackett_C005'] = list(Calibration_Hackett_C005['Specific_Kapps']['Kapp'])
specKapps_01 = pandas.DataFrame(index=list(Calibration_Hackett_C01['Specific_Kapps']['Enzyme_ID']))
specKapps_01['Hackett_C01'] = list(Calibration_Hackett_C01['Specific_Kapps']['Kapp'])
specKapps_016 = pandas.DataFrame(index=list(
Calibration_Hackett_C016['Specific_Kapps']['Enzyme_ID']))
specKapps_016['Hackett_C016'] = list(Calibration_Hackett_C016['Specific_Kapps']['Kapp'])
specKapps_022 = pandas.DataFrame(index=list(
Calibration_Hackett_C022['Specific_Kapps']['Enzyme_ID']))
specKapps_022['Hackett_C022'] = list(Calibration_Hackett_C022['Specific_Kapps']['Kapp'])
specKapps_03 = pandas.DataFrame(index=list(Calibration_Hackett_C03['Specific_Kapps']['Enzyme_ID']))
specKapps_03['Hackett_C03'] = list(Calibration_Hackett_C03['Specific_Kapps']['Kapp'])
all_spec_Kapps = pandas.concat(
[specKapps_005, specKapps_01, specKapps_016, specKapps_022, specKapps_03], axis=1)
all_spec_Kapps['ID'] = all_spec_Kapps.index
all_spec_Kapps.to_csv('Specific_Kapps_out.csv', sep=';', decimal=',')
process_efficiencies_005 = pandas.DataFrame(index=list(
Calibration_Hackett_C005['Process_Efficiencies']['Process']))
process_efficiencies_005['Hackett_C005'] = list(
Calibration_Hackett_C005['Process_Efficiencies']['Value'])
process_efficiencies_01 = pandas.DataFrame(index=list(
Calibration_Hackett_C01['Process_Efficiencies']['Process']))
process_efficiencies_01['Hackett_C01'] = list(
Calibration_Hackett_C01['Process_Efficiencies']['Value'])
process_efficiencies_016 = pandas.DataFrame(index=list(
Calibration_Hackett_C016['Process_Efficiencies']['Process']))
process_efficiencies_016['Hackett_C016'] = list(
Calibration_Hackett_C016['Process_Efficiencies']['Value'])
process_efficiencies_022 = pandas.DataFrame(index=list(
Calibration_Hackett_C022['Process_Efficiencies']['Process']))
process_efficiencies_022['Hackett_C022'] = list(
Calibration_Hackett_C022['Process_Efficiencies']['Value'])
process_efficiencies_03 = pandas.DataFrame(index=list(
Calibration_Hackett_C03['Process_Efficiencies']['Process']))
process_efficiencies_03['Hackett_C03'] = list(
Calibration_Hackett_C03['Process_Efficiencies']['Value'])
all_process_efficiencies = pandas.concat(
[process_efficiencies_005, process_efficiencies_01, process_efficiencies_016, process_efficiencies_022, process_efficiencies_03], axis=1)
all_process_efficiencies['ID'] = all_process_efficiencies.index
all_process_efficiencies.to_csv('Process_efficiencies_out.csv', sep=';', decimal=',')
########
########
Mus_o2 = [0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.28, 0.3, 0.35, 0.4]
O2_J = [0.8, 1.3, 2.5, 3.9, 5.3, 7, 7.4, 6.1, 5.1, 3.7]
Glc_J = [0.3, 0.6, 1.1, 1.7, 2.3, 2.8, 3.4, 4.5, 8.6, 11.1]
CO2_J = [0.8, 1.4, 2.7, 4.2, 5.7, 7.5, 8, 8.8, 14.9, 18.9]
EtOH_J = [0, 0, 0, 0, 0, 0, 0.11, 2.3, 9.5, 13.9]
Ac_J = [0, 0, 0, 0, 0, 0, 0.08, 0.41, 0.62, 0.6]
Glyc_J = [0, 0, 0, 0, 0, 0, 0, 0, 0.05, 0.15]
## Hackett#
Mu_Hackett = [0.0498630244, 0.1054314572, 0.154377453333333, 0.2126503108, 0.293841410333333]
Glc_Hackett = [0.7367, 1.5462, 2.1722, 5.1571, 9.5962]
EtOH_Hackett = [0.0127, 0.0529, 0.1084, 4.6066, 14.0672]
Ac_Hackett = [0.0017, 0.0031, 0.0052, 0.4433, 0.8851]
Glyc_Hackett = [0.0035, 0.0077, 0.0065, 0.0579, 0.1699]
conditions = ['Hackett_C005', 'Hackett_C01', 'Hackett_C016', 'Hackett_C022', 'Hackett_C03']
Mus_predicted = [Calibration_Hackett_C005['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C01['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C016['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C022['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic'],
Calibration_Hackett_C03['Simulation_Results']['Mu'].loc['Mu', 'Prokaryotic']]
Mus_predicted_euk = [Calibration_Hackett_C005['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C01['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C016['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C022['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic'],
Calibration_Hackett_C03['Simulation_Results_Euk']['Mu'].loc['Mu', 'Eukaryotic']]
Glc_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_glc__D', 'Prokaryotic'])]
EtOH_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_etoh', 'Prokaryotic'])]
Ac_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_ac', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_ac', 'Prokaryotic'])]
O2_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_o2', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_o2', 'Prokaryotic'])]
Glycerol_Exchange_predicted = [abs(Calibration_Hackett_C005['Simulation_Results']['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results']
['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results']['ExchangeFluxes'].loc['M_glyc', 'Prokaryotic'])]
###
Glc_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glc__D', 'Eukaryotic'])]
EtOH_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_etoh', 'Eukaryotic'])]
Ac_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_ac', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_ac', 'Eukaryotic'])]
O2_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_o2', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_o2', 'Eukaryotic'])]
Glycerol_Exchange_predicted_euk = [abs(Calibration_Hackett_C005['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C01['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C016['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C022['Simulation_Results_Euk']
['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic']),
abs(Calibration_Hackett_C03['Simulation_Results_Euk']['ExchangeFluxes'].loc['M_glyc', 'Eukaryotic'])]
###
fig, axs = plt.subplots(2, 3, figsize=(28, 7), sharex=True)
# plt.figure()
axs[0, 0].plot(Mu_Hackett, Mu_Hackett, color='lightgreen')
axs[0, 0].scatter(Mu_Hackett, Mus_predicted, color='black')
axs[0, 0].scatter(Mu_Hackett, Mus_predicted_euk, color='red')
axs[0, 0].legend(['Hackett', 'Prok.', 'Euk.'])
axs[0, 0].set_title('Predicted vs measured growth-rate')
axs[0, 0].set_ylabel('$\mu$ [$h^{-1}$]')
axs[0, 0].set_xlabel('$\mu$ [$h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[0, 1].plot(Mus_o2, Glc_J, color='lightblue')
axs[0, 1].plot(Mu_Hackett, Glc_Hackett, color='lightgreen')
axs[0, 1].scatter(Mus_predicted, Glc_Exchange_predicted, color='black', alpha=0.8)
axs[0, 1].scatter(Mus_predicted, Glc_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[0, 1].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[0, 1].set_title('Glucose-uptake rate')
axs[0, 1].set_xlabel('$\mu$ [$h^{-1}$]')
axs[0, 1].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[0, 2].plot(Mus_o2, O2_J, color='lightblue')
# plt.plot(Mu_Hackett,Glc_Hackett,color='lightgreen')
axs[0, 2].scatter(Mus_predicted, O2_Exchange_predicted, color='black', alpha=0.8)
axs[0, 2].scatter(Mus_predicted, O2_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[0, 2].legend(['<NAME>', 'Prok.', 'Euk.'])
axs[0, 2].set_title('Oxygen-uptake rate')
axs[0, 2].set_xlabel('$\mu$ [$h^{-1}$]')
axs[0, 2].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[1, 0].plot(Mus_o2, EtOH_J, color='lightblue')
axs[1, 0].plot(Mu_Hackett, EtOH_Hackett, color='lightgreen')
axs[1, 0].scatter(Mus_predicted, EtOH_Exchange_predicted, color='black', alpha=0.8)
axs[1, 0].scatter(Mus_predicted, EtOH_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_glc__D',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_glc__D',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 0].legend(['van Hoek', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 0].set_title('Ethanol-excretion rate')
axs[1, 0].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 0].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
# plt.figure()
axs[1, 1].plot(Mus_o2, Ac_J, color='lightblue')
axs[1, 1].plot(Mu_Hackett, Ac_Hackett, color='lightgreen')
axs[1, 1].scatter(Mus_predicted, Ac_Exchange_predicted, color='black', alpha=0.8)
axs[1, 1].scatter(Mus_predicted, Ac_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_ac',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_ac',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 1].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 1].set_title('Acetate-excretion rate')
axs[1, 1].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 1].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
# plt.show()
# plt.savefig(pp, format='pdf')
axs[1, 2].plot(Mus_o2, Glyc_J, color='lightblue')
axs[1, 2].plot(Mu_Hackett, Glyc_Hackett, color='lightgreen')
axs[1, 2].scatter(Mus_predicted, Glycerol_Exchange_predicted, color='black', alpha=0.8)
axs[1, 2].scatter(Mus_predicted, Glycerol_Exchange_predicted_euk, color='red', alpha=0.8)
# plt.scatter(Mus_Euk,[abs(i) for i in FluxFile_Euk.loc['M_ac',conditions].values.tolist()],color='red',alpha=0.8)
# plt.scatter(Mus_testcase,[abs(i) for i in FluxFile_testcase.loc['M_ac',conditions].values.tolist()],color='orange',alpha=0.8)
# Effect of Specific Growth Rate on Fermentative Capacity of Baker’s Yeast#
axs[1, 2].legend(['<NAME>', 'Hackett', 'Prok.', 'Euk.'])
axs[1, 2].set_title('Glycerol-excretion rate')
axs[1, 2].set_xlabel('$\mu$ [$h^{-1}$]')
axs[1, 2].set_ylabel('$J^{Ex}$ [$mmol * g^{-1}_{DW} * h^{-1}$]')
plt.show()
protein_comparison_005 = pandas.DataFrame()
for i in list(set(list(Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].index)+list(Calibration_Hackett_C005['Proteome']['ID']))):
protein_comparison_005.loc[i, 'ID'] = i
if i in list(Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].index):
protein_comparison_005.loc[i, 'Predicted'] = 6.023e20 * \
Calibration_Hackett_C005['Simulation_Results']['ProtoProteins'].loc[i].values[0]
if i in list(Calibration_Hackett_C005['Proteome']['ID']):
protein_comparison_005.loc[i, 'Measured'] = 6.023e20 * \
Calibration_Hackett_C005['Proteome'].loc[Calibration_Hackett_C005['Proteome']
['ID'] == i, 'copy_number'].values[0]
protein_comparison_01 = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 2]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, "2013-10-20"]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_group_survival_table_with_weights():
df = load_waltons()
dfw = df.groupby(["T", "E", "group"]).size().reset_index().rename(columns={0: "weights"})
gw, removedw, observedw, censoredw = utils.group_survival_table_from_events(
dfw["group"], dfw["T"], dfw["E"], weights=dfw["weights"]
)
assert len(gw) == 2
assert all(removedw.columns == ["removed:miR-137", "removed:control"])
assert all(removedw.index == observedw.index)
assert all(removedw.index == censoredw.index)
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert_frame_equal(removedw, removed)
assert_frame_equal(observedw, observed)
assert_frame_equal(censoredw, censored)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
# from R
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
# Zeros is exactly random
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
# Itself is 1
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
# Random is close to 0.5
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
expected = pd.DataFrame.from_records(
[
{"E": False, "id": 1, "stop": 1.0, "start": 0, "var1": 0.1},
{"E": False, "id": 1, "stop": 2.0, "start": 1, "var1": 1.0},
{"E": True, "id": 1, "stop": 10.0, "start": 2, "var1": 2.0},
]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
old_value_at_time_0 = seed_df["var1"].iloc[0]
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0, "var2": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=False)
expected = pd.DataFrame.from_records(
[{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0 + old_value_at_time_0, "var2": 2.0}]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_overwrite_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=True)
expected = pd.DataFrame.from_records([{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0}])
assert_frame_equal(df, expected, check_like=True)
def test_enum_flag(self, seed_df, cv1, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", add_enum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", add_enum=True
)
idx = df["id"] == 1
n = idx.sum()
try:
assert_series_equal(df["enum"].loc[idx], pd.Series(np.arange(1, n + 1)), check_names=False)
except AssertionError as e:
# Windows Numpy and Pandas sometimes have int32 or int64 as default dtype
if os.name == "nt" and "int32" in str(e) and "int64" in str(e):
assert_series_equal(
df["enum"].loc[idx], pd.Series(np.arange(1, n + 1), dtype=df["enum"].loc[idx].dtypes), check_names=False
)
else:
raise e
def test_event_col_is_properly_inserted(self, seed_df, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E")
assert df.groupby("id").last()["E"].tolist() == [1, 0]
def test_redundant_cv_columns_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0, "var4": 1},
{"id": 1, "t": 1, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 3, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 6, "var3": 1, "var4": 1},
{"id": 1, "t": 9, "var3": 1, "var4": 1}, # redundant, as nothing changed during the interval
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_will_convert_event_column_to_bools(self, seed_df, cv1):
seed_df["E"] = seed_df["E"].astype(int)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E")
assert df.dtypes["E"] == bool
def test_if_cvs_include_a_start_time_after_the_final_time_it_is_excluded(self, seed_df):
max_T = seed_df["stop"].max()
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0},
{"id": 1, "t": max_T + 10, "var3": 1}, # will be excluded
{"id": 2, "t": 0, "var3": 0},
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_if_cvs_include_a_start_time_before_it_is_included(self, seed_df):
min_T = seed_df["start"].min()
cv = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": min_T - 1, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 3
def test_cvs_with_null_values_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[{"id": None, "t": 0, "var3": 0}, {"id": 1, "t": None, "var3": 1}, {"id": 2, "t": 0, "var3": None}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 1
def test_a_new_row_is_not_created_if_start_times_are_the_same(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv1 = pd.DataFrame.from_records([{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 5, "var3": 1}])
cv2 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var4": 0}, {"id": 1, "t": 5, "var4": 1.5}, {"id": 1, "t": 6, "var4": 1.7}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
assert df.shape[0] == 3
def test_error_is_raised_if_columns_are_missing_in_seed_df(self, seed_df, cv1):
del seed_df["start"]
with pytest.raises(IndexError):
utils.add_covariate_to_timeline(seed_df, cv1, "id", "t", "E")
def test_cumulative_sum(self):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 5, "E": 1}])
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var4": 1}, {"id": 1, "t": 1, "var4": 1}, {"id": 1, "t": 3, "var4": 1}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{"id": 1, "start": 0, "stop": 1.0, "cumsum_var4": 1, "E": False},
{"id": 1, "start": 1, "stop": 3.0, "cumsum_var4": 2, "E": False},
{"id": 1, "start": 3, "stop": 5.0, "cumsum_var4": 3, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_delay(self, cv2):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 50, "E": 1}])
cv3 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "varA": 2}, {"id": 1, "t": 10, "varA": 4}, {"id": 1, "t": 20, "varA": 6}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv3, "id", "t", "E", delay=2).fillna(0)
expected = pd.DataFrame.from_records(
[
{"start": 0, "stop": 2.0, "varA": 0.0, "id": 1, "E": False},
{"start": 2, "stop": 12.0, "varA": 2.0, "id": 1, "E": False},
{"start": 12, "stop": 22.0, "varA": 4.0, "id": 1, "E": False},
{"start": 22, "stop": 50.0, "varA": 6.0, "id": 1, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_covariates_from_event_matrix_with_simple_addition(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame([[1, 1], [2, 2], [3, 3], [4, None]], columns=["id", "poison"])
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
assert pd.notnull(ldf).all().all()
expected = pd.DataFrame(
[
(0.0, 0.0, 1.0, 1, False),
(1.0, 1.0, 5.0, 1, True),
(0.0, 0.0, 2.0, 2, False),
(2.0, 1.0, 4.0, 2, True),
(0.0, 0.0, 3.0, 3, False),
(3.0, 1.0, 8.0, 3, True),
(0.0, 0.0, 4.0, 4, True),
],
columns=["start", "cumsum_poison", "stop", "id", "e"],
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_covariates_from_event_matrix(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame(
[[1, 1, None, 2], [2, None, 5, None], [3, 3, 3, 7]], columns=["id", "promotion", "movement", "raise"]
)
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 0.0,
"stop": 1.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 1.0,
"stop": 2.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 1.0,
"start": 2.0,
"stop": 5.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 1.0,
"id": 2.0,
"start": 0.0,
"stop": 4.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 0.0,
"stop": 3.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 3.0,
"stop": 7.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 3.0,
"start": 7.0,
"stop": 8.0,
},
{
"cumsum_movement": None,
"cumsum_promotion": None,
"cumsum_raise": None,
"e": 1.0,
"id": 4.0,
"start": 0.0,
"stop": 4.0,
},
]
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_to_episodic_format_with_long_time_gap_is_identical(self):
rossi = load_rossi()
rossi["id"] = np.arange(rossi.shape[0])
long_rossi = utils.to_episodic_format(rossi, duration_col="week", event_col="arrest", id_col="id", time_gaps=1000.0)
# using astype(int) would fail on Windows because int32 and int64 are used as dtype
long_rossi["week"] = long_rossi["stop"].astype(rossi["week"].dtype)
del long_rossi["start"]
del long_rossi["stop"]
assert_frame_equal(long_rossi, rossi, check_like=True)
def test_to_episodic_format_preserves_outcome(self):
E = [1, 1, 0, 0]
df = pd.DataFrame({"T": [1, 3, 1, 3], "E": E, "id": [1, 2, 3, 4]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 3 + 1 + 3
assert long_df.groupby("id").last()["E"].tolist() == E
def test_to_episodic_format_handles_floating_durations(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 4
assert long_df["stop"].tolist() == [0.1, 1, 2, 3, 3.5]
def test_to_episodic_format_handles_floating_durations_with_time_gaps(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df["stop"].tolist() == [0.1, 2, 3.5]
def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):
df = | pd.DataFrame({"T": [0.1, 3.5], "E": [1, 0], "id": [1, 2]}) | pandas.DataFrame |
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
import pickle
import urllib.request
import json
from pprint import pprint
from pandas.io.json import json_normalize
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge
from math import sqrt
import os
import errno
from pymongo import MongoClient
import urllib.parse as urlparse
from influxdb import InfluxDBClient
from pymongo import MongoClient
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import TheilSenRegressor
from sklearn.datasets import make_regression
class Terminus(BaseHTTPRequestHandler):
def getAllNodeNames(self,client):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY=nodename;")
nodeNames_temp = list(queryResult.get_points())
dfnodeNames = pd.DataFrame(nodeNames_temp)
allNodeNames = dfnodeNames[:]["value"]
return allNodeNames
def getNamespaceNames(self,client,node):
nsQuery = client.query("SHOW TAG VALUES FROM uptime WITH KEY=namespace_name WHERE nodename = '"+node+"';")
nsQuery_temp = list(nsQuery.get_points())
dfnsNames = pd.DataFrame(nsQuery_temp)
allnsNames = dfnsNames[:]["value"]
return allnsNames
def getAllPodNames(self,client,node,ns_name):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY = pod_name WHERE namespace_name = '"+ns_name+"' AND nodename = '"+node+"';")
podNames_temp = list(queryResult.get_points())
dfpodNames = pd.DataFrame(podNames_temp)
if dfpodNames.empty:
return dfpodNames
else:
allpodNames = dfpodNames[:]["value"]
return allpodNames
def getCPUUtilizationNode(self,client, node):
queryResult = client.query('SELECT * FROM "cpu/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/node_utilization'])
return dfcpuUtilization
def getCPUUtilizationPod(self,client, node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def getCPUUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def prepareCpuUtilization(self,client,node,ns_name, pod_name):
cpuUtilization = self.getCPUUtilizationNode(client,node)
podCpuUtilization = self.getCPUUtilizationPod(client,node,ns_name, pod_name)
containercpuUtilization = self.getCPUUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(cpuUtilization.index, cpuUtilization['value'] *1000, 'r', label="node") # plotting t, a separately
plt.plot(podCpuUtilization.index, podCpuUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containercpuUtilization.index, containercpuUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getMemoryUtilizationNode(self,client,node):
queryResult = client.query('SELECT * FROM "memory/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/node_utilization'])
return dfmemUtilization
def getMemoryUtilizationPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def getMemoryUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def prepareMemoryUtilization(self,client,node,ns_name, pod_name):
memoryUtilization = self.getMemoryUtilizationNode(client,node)
podMemoryUtilization = self.getMemoryUtilizationPod(client,node,ns_name, pod_name)
containerMemoryUtilization = self.getMemoryUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(memoryUtilization.index, memoryUtilization['value'], 'r', label="node") # plotting t, a separately
plt.plot(podMemoryUtilization.index, podMemoryUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containerMemoryUtilization.index, containerMemoryUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getNetworkTxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_rate'])
return dfmemUtilization
def getNetworkTxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx'])
return dfmemUtilization
def getNetworkTxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors'])
return dfmemUtilization
def getNetworkTxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors_rate'])
return dfmemUtilization
def prepareNetworkTxRateUtilization(self,client,node,ns_name, pod_name):
podNetworTxRate = self.getNetworkTxRatePod(client,node,ns_name, pod_name)
podNetworTx = self.getNetworkTxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkTxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkTxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworTxRate.index, podNetworTxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworTx.index, podNetworTx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getNetworkRxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_rate'])
return dfmemUtilization
def getNetworkRxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx'])
return dfmemUtilization
def getNetworkRxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors'])
return dfmemUtilization
def getNetworkRxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors_rate'])
return dfmemUtilization
def prepareNetworkRxRateUtilization(self,client,node,ns_name, pod_name):
podNetworRxRate = self.getNetworkRxRatePod(client,node,ns_name, pod_name)
podNetworRx = self.getNetworkRxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkRxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkRxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworRxRate.index, podNetworRxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworRx.index, podNetworRx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getRelevantNodeName(self,client,ns_name):
allNodeNames = self.getAllNodeNames(client)
#nsNames = getNamespaceNames(allNodeNames[0])
relevantNodes = []
for node in allNodeNames:
allPodNamesNode = self.getAllPodNames(client,node,'default')
if(not allPodNamesNode.empty):
relevantNodes.append(node)
return relevantNodes
def getNodeResourceUtilizationDf(self,client, nodeName):
Result_node_CPU = client.query("SELECT value from \"cpu/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_MEM = client.query("SELECT value from \"memory/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_CPU_Cores = client.query("SELECT mean(\"value\") FROM \"cpu/node_capacity\" where nodename = '"+nodeName+
"' AND type = 'node' GROUP BY time(1m)")
Result_node_mem_node = client.query("SELECT mean(\"value\")FROM \"memory/node_capacity\" where nodename = '"+
nodeName+"' AND type = 'node' GROUP BY time(1m)")
cpu_points = pd.DataFrame(Result_node_CPU.get_points())
cpu_points['time'] = pd.to_datetime(cpu_points['time'])
cpu_points = cpu_points.set_index('time')
cpu_points.columns = ['node_cpu_util']
mem_points = pd.DataFrame(Result_node_MEM.get_points())
mem_points['time'] = pd.to_datetime(mem_points['time'])
mem_points = mem_points.set_index('time')
mem_points.columns = ['node_mem_util']
cores_points = pd.DataFrame(Result_node_CPU_Cores.get_points())
cores_points['time'] = pd.to_datetime(cores_points['time'])
cores_points = cores_points.set_index('time')
cores_points.columns = ['node_cores']
mem_node_points = pd.DataFrame(Result_node_mem_node.get_points())
mem_node_points['time'] = pd.to_datetime(mem_node_points['time'])
mem_node_points = mem_node_points.set_index('time')
mem_node_points.columns = ['node_mem']
df_node =pd.concat([cpu_points, mem_points,cores_points,mem_node_points], axis=1)
return df_node
def getPodResourceUtilizationDf(self,client, node, ns_name, pod_name):
Result_Pod_CPU_usage = client.query('SELECT value FROM "cpu/usage_rate" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_MEM_usage = client.query('SELECT value from \"memory/usage\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_CPU_limit = client.query('SELECT mean(\"value\") FROM "cpu/limit" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_limit = client.query('SELECT mean(\"value\") from \"memory/limit\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
Result_Pod_CPU_requests = client.query('SELECT mean(\"value\") FROM "cpu/request" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_requests = client.query('SELECT mean(\"value\") from \"memory/request\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
cpu_points_usage = pd.DataFrame(Result_Pod_CPU_usage.get_points())
cpu_points_usage['time'] = pd.to_datetime(cpu_points_usage['time'])
cpu_points_usage = cpu_points_usage.set_index('time')
cpu_points_usage.columns = ['pod_cpu_usage']
mem_points_usage = pd.DataFrame(Result_Pod_MEM_usage.get_points())
mem_points_usage['time'] = pd.to_datetime(mem_points_usage['time'])
mem_points_usage = mem_points_usage.set_index('time')
mem_points_usage.columns = ['pod_mem_usage']
cpu_points_limits = pd.DataFrame(Result_Pod_CPU_limit.get_points())
cpu_points_limits['time'] = pd.to_datetime(cpu_points_limits['time'])
cpu_points_limits = cpu_points_limits.set_index('time')
cpu_points_limits.columns = ['pod_cpu_limit']
mem_points_limits = pd.DataFrame(Result_Pod_MEM_limit.get_points())
mem_points_limits['time'] = pd.to_datetime(mem_points_limits['time'])
mem_points_limits = mem_points_limits.set_index('time')
mem_points_limits.columns = ['pod_mem_limit']
cpu_points_request = pd.DataFrame(Result_Pod_CPU_requests.get_points())
cpu_points_request['time'] = pd.to_datetime(cpu_points_request['time'])
cpu_points_request = cpu_points_request.set_index('time')
cpu_points_request.columns = ['pod_cpu_request']
mem_points_request = pd.DataFrame(Result_Pod_MEM_requests.get_points())
mem_points_request['time'] = pd.to_datetime(mem_points_request['time'])
mem_points_request = mem_points_request.set_index('time')
mem_points_request.columns = ['pod_mem_request']
df_pod =pd.concat([cpu_points_usage, mem_points_usage,cpu_points_limits,mem_points_limits,cpu_points_request,mem_points_request ], axis=1)
return df_pod
def getRequestsDf(self,clientK6):
queryResult = clientK6.query('SELECT sum("value") FROM "vus" group by time(1m);')
vus = pd.DataFrame(queryResult['vus'])
vus.columns = ['vus','time']
vus = vus.set_index('time')
queryResultReqs = clientK6.query('SELECT sum("value") FROM "http_reqs" group by time(1m);')
reqs = pd.DataFrame(queryResultReqs['http_reqs'])
reqs.columns = ['requests','time']
reqs = reqs.set_index('time')
queryResultReqsDuration95 = clientK6.query('SELECT percentile("value", 95) FROM "http_req_duration" group by time(1m) ;')
reqs_duration95 = pd.DataFrame(queryResultReqsDuration95['http_req_duration'])
reqs_duration95.columns = [ 'requests_duration_percentile_95','time']
reqs_duration95 = reqs_duration95.set_index('time')
queryResultReqsDuration90 = clientK6.query('SELECT percentile("value", 90) FROM "http_req_duration" group by time(1m) ;')
reqs_duration90 = pd.DataFrame(queryResultReqsDuration90['http_req_duration'])
reqs_duration90.columns = ['requests_duration_percentile_90','time']
reqs_duration90 = reqs_duration90.set_index('time')
queryResultMaxDuration = clientK6.query('SELECT max("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_max = pd.DataFrame(queryResultMaxDuration['http_req_duration'])
reqs_duration_max.columns = ['requests_duration_max','time']
reqs_duration_max = reqs_duration_max.set_index('time')
queryResultMinDuration = clientK6.query('SELECT min("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_min = pd.DataFrame(queryResultMinDuration['http_req_duration'])
reqs_duration_min.columns = ['requests_duration_min','time']
reqs_duration_min = reqs_duration_min.set_index('time')
queryResultMeanDuration = clientK6.query('SELECT mean("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_mean = pd.DataFrame(queryResultMeanDuration['http_req_duration'])
reqs_duration_mean.columns = ['requests_duration_mean','time']
reqs_duration_mean = reqs_duration_mean.set_index('time')
queryResultMedianDuration = clientK6.query('SELECT median("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_median = pd.DataFrame(queryResultMedianDuration['http_req_duration'])
reqs_duration_median.columns = ['requests_duration_median','time']
reqs_duration_median = reqs_duration_median.set_index('time')
finalDF = pd.merge(vus, reqs, left_index=True, right_index=True)
finalDF = pd.merge(finalDF, reqs_duration95, left_index=True, right_index=True)
finalDF = | pd.merge(finalDF, reqs_duration90, left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python
"""
Based on all checkm results, creates a table containing the nr of approved genomes
for all binning runs it can find within binning/*.
@author: alneberg
"""
from __future__ import print_function
import sys
import os
import argparse
import pandas as pd
import glob
def find_checkm_dirs():
all_runs = {}
for path in glob.glob("binning/*/*/output_*/*/checkm_output/stats.tsv"):
run_d = {}
path_parts = path.split('/')
run_d["binner"] = path_parts[1]
run_d["sample"] = path_parts[2]
run_d["quant"] = "_".join(path_parts[3].split('_')[1:])
run_d["run_params"] = path_parts[4]
run_d['SpeedUp'] = 'SpeedUp_Mp' in path_parts[4]
run_d['standardize'] = 'standardize' in path_parts[4]
all_runs[path] = run_d
return all_runs
def main(args):
all_runs = find_checkm_dirs()
for path, run_d in all_runs.items():
# Read in the checkm table
df = | pd.read_table(path, index_col=0) | pandas.read_table |
# coding=utf-8
# numpy and pandas for data manipulation
import numpy as np
import pandas as pd
# sklearn preprocessing for dealing with categorical variables
from sklearn.preprocessing import LabelEncoder
# File system manangement
import os
# Suppress warnings
import warnings
warnings.filterwarnings('ignore')
# matplotlib and seaborn for plotting
import matplotlib.pyplot as plt
# import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
# In[2]:
app_train = pd.read_csv('E:/datas/public.train.csv') # 读取训练数据
app_test = pd.read_csv('E:/datas/public.test.csv') # 读取测试数据
train_id = app_train[['ID']] # 获取训练id
test_id=app_test[['ID']] # 获取测试id
app_train_test = [app_train, app_test]
app_train_test = | pd.concat(app_train_test) | pandas.concat |
# -------------------------------------------------------------------------------
# Name: critical_loads.py
# Purpose: Functions to implement the updated (November 2018) Critical Loads
# workflow.
#
# Author: <NAME>
# -------------------------------------------------------------------------------
def view_dep_series(eng):
"""View table of deposition series already in the database.
Args:
eng: Obj. Active database connection object
Returns:
Dataframe.
"""
import pandas as pd
# Get existing series from db
sql = "SELECT * FROM deposition.dep_series_defs"
df = pd.read_sql(sql, eng)
return df
def add_dep_series(series_id, name, short_name, grid, desc, eng):
"""Add new deposition series to the database.
Args:
series_id: Int. Unique integer ID for series
name: Str. Full name for this series
short_name: Str. Short name for this series
grid: Str. One of ['blr', '0_1deg', 'emep']
desc: Str. Description of series
eng: Obj. Active database connection object
Returns:
None. Row is added.
"""
import pandas as pd
from sqlalchemy.sql import text
assert isinstance(series_id, int), "'series_id' must be an integer."
assert grid in (
"blr",
"0_1deg",
"emep",
), "'grid' must be one of ('blr', '0_1deg', 'emep')."
# Get existing series from db
sql = (
"INSERT INTO deposition.dep_series_defs "
"(series_id, name, short_name, grid, description) "
"VALUES "
"(:series_id, :name, :short_name, :grid, :desc)"
)
param_dict = {
"series_id": series_id,
"name": name,
"short_name": short_name,
"grid": grid,
"desc": desc,
}
sql = text(sql)
eng.execute(sql, param_dict)
print("Series added successfully.")
return None
def upload_nilu_0_1deg_dep_data(data_fold, eng, series_id):
"""Process .dat files containing deposition data supplied by NILU. This
function is based on the data supplied by NILU during 2017, which uses
the new 0.1 degree deposition grid.
Args:
dat_fold: Str. Path to folder containing .dat files provided by NILU
eng: Obj. Active database connection object connect to the Docker
PostGIS db
series_id: Int. 'series_id' for this dataset from the table
deposition.dep_series_defs
Returns:
DataFrame of the data added to the database.
"""
import glob
import os
import pandas as pd
# Read NILU data
search_path = os.path.join(data_fold, "*.dat")
file_list = glob.glob(search_path)
df_list = []
for fpath in file_list:
# Get par name
name = os.path.split(fpath)[1].split("_")[:2]
name = "_".join(name)
# Read file
df = pd.read_csv(
fpath, delim_whitespace=True, header=None, names=["lat", "lon", name]
)
df.set_index(["lat", "lon"], inplace=True)
df_list.append(df)
# Combine
df = pd.concat(df_list, axis=1)
df.reset_index(inplace=True)
# Calculate unique integer cell ID as latlon
# (both *100 and padded to 4 digits)
df["cell_id"] = (df["lat"] * 100).astype(int).map("{:04d}".format) + (
df["lon"] * 100
).astype(int).map("{:04d}".format)
df["cell_id"] = df["cell_id"].astype(int)
del df["lat"], df["lon"], df["tot_n"]
# Rename
df.rename(
columns={
"tot_nhx": 2, # N (red)
"tot_nox": 1, # N (oks)
"tot_s": 4,
}, # Non-marine S
inplace=True,
)
# Melt
df = pd.melt(df, var_name="param_id", id_vars="cell_id")
# Add series ID
df["series_id"] = series_id
# Add to db
df.to_sql(
"dep_values_0_1deg_grid",
con=eng,
schema="deposition",
if_exists="append",
index=False,
method="multi",
chunksize=1000,
)
print("%s new rows added successfully." % len(df))
return df
def extract_deposition_as_gdf(series_id, par, eng, veg_class=None):
"""Extracts deposition data for the specified series as a geodataframe.
Args:
series_id: Int. ID for deposition series of interest
par: Str. One of ['nitrogen', 'sulphur']
eng: Obj. Active database connection object
veg_class: Str or None. Only applies for data using the EMEP grid, which
reports deposition values for different vegetation classes.
For EMEP, must be one of ['grid average', 'forest', 'semi-natural'];
otherwise, pass None
Returns:
GeoDataFrame.
"""
import geopandas as gpd
import warnings
from sqlalchemy.sql import text
veg_class_dict = {"grid average": 1, "forest": 2, "semi-natural": 3}
assert isinstance(series_id, int), "'series_id' must be an integer."
assert par in (
"nitrogen",
"sulphur",
), "'par' must be one of ('nitrogen', 'sulphur')."
# Identify grid
param_dict = {"series_id": series_id}
sql = "SELECT grid FROM deposition.dep_series_defs " "WHERE series_id = :series_id"
sql = text(sql)
grid = eng.execute(sql, param_dict).fetchall()[0][0]
assert (
grid is not None
), "'grid' is not defined for this series in the 'dep_series_defs' table.\n"
if (grid == "emep") and (veg_class is None):
assert veg_class in ["grid average", "forest", "semi-natural"], (
"The specified series ID refers to the EMEP grid, "
"so you must also specify the 'veg_class' parameter.\n"
"Choose one of ['grid average', 'forest', 'semi-natural'] "
"and pass e.g. veg_class='grid average'."
)
if (grid != "emep") and (veg_class is not None):
print(
"WARNING: The specified series ID does NOT refer to the EMEP grid. "
"The 'veg_class' parameter will be ignored."
)
if par == "nitrogen":
unit_factor = 1 / 14.01
if grid == "emep":
param_dict["veg_class_id"] = veg_class_dict[veg_class]
# Choose 'grid-average' for veg class
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.n_dep) AS ndep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as n_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id IN (1, 2) "
f" AND veg_class_id = :veg_class_id "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
else:
# No veg classes to consider
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.n_dep) AS ndep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as n_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id IN (1, 2) "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
else:
unit_factor = 2 / 32.06
if grid == "emep":
param_dict["veg_class_id"] = veg_class_dict[veg_class]
# Choose 'grid-average' for veg class
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.s_dep) AS sdep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as s_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id = 4 "
f" AND veg_class_id = :veg_class_id "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
else:
# No veg classes to consider
sql = (
f"SELECT ST_Multi(ST_Transform(b.geom, 32633)) AS geom, "
f" a.cell_id, "
f" ROUND(a.s_dep) AS sdep_mgpm2pyr "
f"FROM (SELECT cell_id, SUM(value) as s_dep "
f" FROM deposition.dep_values_{grid}_grid "
f" WHERE param_id = 4 "
f" AND series_id = :series_id "
f" GROUP BY cell_id) AS a, "
f"deposition.dep_grid_{grid} AS b "
f"WHERE a.cell_id = b.cell_id"
)
sql = text(sql)
gdf = gpd.read_postgis(sql, eng, params=param_dict)
# Convert units
gdf[par[0] + "dep_meqpm2pyr"] = gdf[par[0] + "dep_mgpm2pyr"] * unit_factor
gdf[par[0] + "dep_kgphapyr"] = gdf[par[0] + "dep_mgpm2pyr"] / 100
return gdf
def create_deposition_raster(
series_id,
par,
unit,
cell_size,
eng,
ndv=-9999,
bit_depth="Int16",
fname=None,
veg_class=None,
):
"""Create a raster of deposition values from a Geodataframe.
Args:
series_id: Int. ID for deposition series of interest
par: Str. One of ['nitrogen', 'sulphur']
unit: Str. One of ['mgpm2pyr', kgphapyr, 'meqpm2pyr']
cell_size: Int. Output cell size in metres. Determines the "snap raster" to be used
One of (30, 60, 120)
eng: Obj. Active database connection object
ndv: Int. Value to use for No Data
bit_depth: Str. GDAL bit depth:
'Byte'
'Int16'
'UInt16'
'UInt32'
'Int32'
'Float32'
'Float64'
fname: Str or None. File path for output. If None, the raster will be saved to
veg_class: Str or None. Only applies for data using the EMEP grid, which
reports deposition values for different vegetation classes.
For EMEP, must be one of ['grid average', 'forest', 'semi-natural'];
otherwise, pass None
shared/critical_loads/raster/deposition/short_name.tif
where 'short_name' is as defined in the 'dep_series_defs' table.
Returns:
None. The grid is saved to the specified path.
"""
import geopandas as gpd
import os
from sqlalchemy.sql import text
assert unit in (
"mgpm2pyr",
"kgphapyr" "meqpm2pyr",
), "'unit' must be one of ('mgpm2pyr', 'kgphapyr', 'meqpm2pyr')."
assert cell_size in (30, 60, 120), "'cell_size' must be one of (30, 60, 120)."
# Get data
gdf = extract_deposition_as_gdf(series_id, par, eng, veg_class=veg_class)
# Save temporary file
gdf.to_file("temp_ndep.geojson", driver="GeoJSON")
# Convert to raster
col_name = f"{par[0]}dep_{unit}"
if fname is None:
# Get short_name from db
param_dict = {"series_id": series_id}
sql = "SELECT short_name FROM deposition.dep_series_defs WHERE series_id = :series_id"
sql = text(sql)
res = eng.execute(sql, param_dict).fetchall()[0][0]
assert res is not None, (
"'short_name' is not defined for this series in the 'dep_series_defs' table.\n"
"Consider explicitly specifying a file name?"
)
fname = f"/home/jovyan/shared/critical_loads/raster/deposition/{col_name}_{res}_{cell_size}m.tif"
snap_tif = (
f"/home/jovyan/shared/critical_loads/raster/blr_land_mask_{cell_size}m.tif"
)
vec_to_ras("temp_ndep.geojson", fname, snap_tif, col_name, ndv, bit_depth)
# Delete temp file
os.remove("temp_ndep.geojson")
def vec_to_ras(in_shp, out_tif, snap_tif, attrib, ndv, data_type, fmt="GTiff"):
"""Converts a shapefile to a raster with values taken from
the 'attrib' field. The 'snap_tif' is used to set the
resolution and extent of the output raster.
Args:
in_shp: Str. Raw string to shapefile
out_tif: Str. Raw string for geotiff to create
snap_tif: Str. Raw string to geotiff used to set resolution
and extent
attrib: Str. Shapefile field for values
ndv: Int. No data value
data_type: Str. GDAL bit depth:
'Byte'
'Int16'
'UInt16'
'UInt32'
'Int32'
'Float32'
'Float64'
fmt: Str. Format string.
Returns:
None. Raster is saved.
"""
import ogr
import gdal
# Bit depth dict
bit_dict = {
"Byte": gdal.GDT_Byte,
"Int16": gdal.GDT_Int16,
"UInt16": gdal.GDT_UInt16,
"UInt32": gdal.GDT_UInt32,
"Int32": gdal.GDT_Int32,
"Float32": gdal.GDT_Float32,
"Float64": gdal.GDT_Float64,
}
assert data_type in bit_dict.keys(), "ERROR: Invalid data type."
# 1. Create new, empty raster with correct dimensions
# Get properties from snap_tif
snap_ras = gdal.Open(snap_tif)
cols = snap_ras.RasterXSize
rows = snap_ras.RasterYSize
proj = snap_ras.GetProjection()
geotr = snap_ras.GetGeoTransform()
# Create out_tif
driver = gdal.GetDriverByName(fmt)
out_ras = driver.Create(
out_tif, cols, rows, 1, bit_dict[data_type], options=["COMPRESS=LZW"]
)
out_ras.SetProjection(proj)
out_ras.SetGeoTransform(geotr)
# Fill output with NoData
out_ras.GetRasterBand(1).SetNoDataValue(ndv)
out_ras.GetRasterBand(1).Fill(ndv)
# 2. Rasterize shapefile
shp_ds = ogr.Open(in_shp)
shp_lyr = shp_ds.GetLayer()
gdal.RasterizeLayer(
out_ras, [1], shp_lyr, options=["ATTRIBUTE=%s" % attrib, "COMPRESS=LZW"]
)
# Flush and close
snap_ras = None
out_ras = None
shp_ds = None
def reclassify_raster(in_tif, mask_tif, out_tif, reclass_df, reclass_col, ndv):
"""Reclassify categorical values in a raster using a mapping
in a dataframe. The dataframe index must contain the classes
in in_tif and the 'reclass_col' must specify the new classes.
Only cells with value=1 in 'mask_tif' are written to output.
Args:
in_tif: Str. Raw path to input raster
mask_tif: Str. Raw path to mask grid defining land area
out_tif: Str. Raw path to .tif file to create
reclass_df: DataFrame. Reclassification table
reclass_col: Str. Name of column with new raster values
ndv: Int. Value to use as NoData in the new raster
Returns:
None. A new raster is saved.
"""
import gdal
import ogr
from gdalconst import GA_ReadOnly as GA_ReadOnly
import numpy as np
import pandas as pd
# Open source file, read data
src_ds = gdal.Open(in_tif, GA_ReadOnly)
assert src_ds
rb = src_ds.GetRasterBand(1)
src_data = rb.ReadAsArray()
# Open mask, read data
mask_ds = gdal.Open(mask_tif, GA_ReadOnly)
assert mask_ds
mb = mask_ds.GetRasterBand(1)
mask_data = mb.ReadAsArray()
# Reclassify
rc_data = src_data.copy()
for idx, row in reclass_df.iterrows():
rc_data[src_data == idx] = row[reclass_col]
# Apply mask
rc_data[mask_data != 1] = ndv
# Write output
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.CreateCopy(out_tif, src_ds, 0, options=["COMPRESS=LZW"])
out_band = dst_ds.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(rc_data)
# Flush data and close datasets
dst_ds = None
src_ds = None
mask_ds = None
def calc_vegetation_exceedance_0_1deg(dep_tif, cl_tif, ex_tif, ex_tif_bool, ser_id):
"""Calculate exceedances for vegetation.
Args:
dep_tif: Str. Raw string to deposition grid
cl_tif: Str. Raw string to critical loads grid
ex_tif: Str. Raw string to exceedance grid to be created
ex_tif_bool: Str. Raw string to exceedance grid with Boolean values (i.e. 1
where exceeded and 0 otherwise)
ser_id: Int. Deposition series ID for the data of interest
Returns:
Summary dataframe.
"""
import nivapy3 as nivapy
import pandas as pd
import numpy as np
import gdal
# Container for output
data_dict = {"total_area_km2": [], "exceeded_area_km2": []}
# Read grids
cl_grid, cl_ndv, cl_epsg, cl_ext = nivapy.spatial.read_raster(cl_tif)
dep_grid, dep_ndv, dep_epsg, dep_ext = nivapy.spatial.read_raster(dep_tif)
# Work out cell size
cs = (cl_ext[1] - cl_ext[0]) / cl_grid.shape[1]
cs = float(int(cs + 0.5))
# Upcast to float32 for safe handling of negative values
cl_grid = cl_grid.astype(np.float32)
dep_grid = dep_grid.astype(np.float32)
# Set ndv
cl_grid[cl_grid == cl_ndv] = np.nan
dep_grid[dep_grid == dep_ndv] = np.nan
# Get total area of non-NaN from dep grid
nor_area = np.count_nonzero(~np.isnan(dep_grid)) * cs * cs / 1.0e6
# Apply scaling factor to CLs
cl_grid = cl_grid * 100.0
# Exceedance
ex_grid = dep_grid - cl_grid
del dep_grid, cl_grid
# Get total area exceeded
ex_area = np.count_nonzero(ex_grid > 0) * cs * cs / 1.0e6
# Set <0 to 0
ex_grid[ex_grid < 0] = 0
# Reset ndv
ex_grid[np.isnan(ex_grid)] = -1
# Downcast to int16 to save space
ex_grid = ex_grid.round(0).astype(np.int16)
# Append results
data_dict["total_area_km2"].append(nor_area)
data_dict["exceeded_area_km2"].append(ex_area)
# Write exceedance output
write_geotiff(ex_grid, ex_tif, cl_tif, -1, gdal.GDT_Int16)
# Convert to bool grid
ex_grid[ex_grid > 0] = 1
ex_grid[ex_grid == -1] = 255
# Write bool output
write_geotiff(ex_grid, ex_tif_bool, cl_tif, 255, gdal.GDT_Byte)
del ex_grid
# Build output df
ex_df = pd.DataFrame(data_dict)
ex_df["exceeded_area_pct"] = (
100 * ex_df["exceeded_area_km2"] / ex_df["total_area_km2"]
)
ex_df = ex_df.round(0).astype(int)
ex_df["series_id"] = ser_id
ex_df["medium"] = "vegetation"
ex_df = ex_df[
[
"series_id",
"medium",
"total_area_km2",
"exceeded_area_km2",
"exceeded_area_pct",
]
]
return ex_df
def write_geotiff(data, out_tif, snap_tif, ndv, data_type):
"""Write a numpy array to a geotiff using 'snap_tif' to define
raster properties.
Args:
data: Array.
out_tif: Str. File to create
snap_tif: Str. Path to existing tif with same resolution
and extent as target
ndv: Int. No data value
data_type: Bit depth etc. e.g. gdal.GDT_UInt32
Returns:
None. Geotiff is saved.
"""
from osgeo import ogr
from osgeo import gdal
# 1. Create new, empty raster with correct dimensions
# Get properties from snap_tif
snap_ras = gdal.Open(snap_tif)
cols = snap_ras.RasterXSize
rows = snap_ras.RasterYSize
proj = snap_ras.GetProjection()
geotr = snap_ras.GetGeoTransform()
# Create out_tif
driver = gdal.GetDriverByName("GTiff")
out_ras = driver.Create(out_tif, cols, rows, 1, data_type, options=["COMPRESS=LZW"])
out_ras.SetProjection(proj)
out_ras.SetGeoTransform(geotr)
# Write data
out_band = out_ras.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(data)
# Flush and close
snap_ras = None
out_band = None
out_ras = None
def bbox_to_pixel_offsets(gt, bbox):
"""Helper function for zonal_stats(). Modified from:
https://gist.github.com/perrygeo/5667173
Original code copyright 2013 <NAME>
"""
originX = gt[0]
originY = gt[3]
pixel_width = gt[1]
pixel_height = gt[5]
x1 = int((bbox[0] - originX) / pixel_width)
x2 = int((bbox[1] - originX) / pixel_width) + 1
y1 = int((bbox[3] - originY) / pixel_height)
y2 = int((bbox[2] - originY) / pixel_height) + 1
xsize = x2 - x1
ysize = y2 - y1
return (x1, y1, xsize, ysize)
def remap_categories(category_map, stats):
"""Modified from https://gist.github.com/perrygeo/5667173
Original code copyright 2013 <NAME>
"""
def lookup(m, k):
"""Dict lookup but returns original key if not found"""
try:
return m[k]
except KeyError:
return k
return {lookup(category_map, k): v for k, v in stats.items()}
def exceedance_stats_per_0_1deg_cell(
ex_tif,
ser_id,
eng,
write_to_db=True,
nodata_value=-1,
global_src_extent=False,
categorical=False,
category_map=None,
):
"""Summarise exceedance values for each 0.1 degree grid cell.
Args:
raster_path: Raw str. Path to exceedance raster
ser_id: Int. Deposition series ID
eng: Obj. Active database connection object
write_to_db: Bool. If True, results will be written to the database
nodata_value: Float. Value in raster to treat as NoData
global_src_extent: Bool. If True, reads all data into memory in a single
pass. May be faster, but also takes up loats of memory
when used with large vector or raster datasets
categorical: Bool. If true, raster is assumed to be categorical, with
integer values representing different categories (e.g. land
use). In this case, the statistics returned are pixel counts
of each category within each vector zone
category_map: Dict. Only used when "categorical" is True. Dict mapping
integer values to category names {int_id:'cat_name'}. If
supplied, the integer categories in the results dataframe
will be mapped to the specified category names
Returns:
GeoDataFrame of cell statistics.
"""
import gdal
import ogr
import numpy as np
import pandas as pd
import sys
import geopandas as gpd
import os
from gdalconst import GA_ReadOnly
gdal.PushErrorHandler("CPLQuietErrorHandler")
# Read vector
temp_fold = os.path.split(ex_tif)[0]
temp_shp = os.path.join(temp_fold, "temp.shp")
gdf = extract_deposition_as_gdf(ser_id, "nitrogen", eng, veg_class="grid_average")
gdf.to_file(temp_shp)
# Read raster
rds = gdal.Open(ex_tif, GA_ReadOnly)
assert rds
rb = rds.GetRasterBand(1)
rgt = rds.GetGeoTransform()
# Get cell size
cs = rgt[1]
if nodata_value:
nodata_value = float(nodata_value)
rb.SetNoDataValue(nodata_value)
vds = ogr.Open(
temp_shp, GA_ReadOnly
) # TODO maybe open update if we want to write stats
assert vds
vlyr = vds.GetLayer(0)
# create an in-memory numpy array of the source raster data
# covering the whole extent of the vector layer
if global_src_extent:
# use global source extent
# useful only when disk IO or raster scanning inefficiencies are your limiting factor
# advantage: reads raster data in one pass
# disadvantage: large vector extents may have big memory requirements
src_offset = bbox_to_pixel_offsets(rgt, vlyr.GetExtent())
src_array = rb.ReadAsArray(*src_offset)
# calculate new geotransform of the layer subset
new_gt = (
(rgt[0] + (src_offset[0] * rgt[1])),
rgt[1],
0.0,
(rgt[3] + (src_offset[1] * rgt[5])),
0.0,
rgt[5],
)
mem_drv = ogr.GetDriverByName("Memory")
driver = gdal.GetDriverByName("MEM")
# Loop through vectors
stats = []
feat = vlyr.GetNextFeature()
while feat is not None:
if not global_src_extent:
# use local source extent
# fastest option when you have fast disks and well indexed raster (ie tiled Geotiff)
# advantage: each feature uses the smallest raster chunk
# disadvantage: lots of reads on the source raster
src_offset = bbox_to_pixel_offsets(rgt, feat.geometry().GetEnvelope())
src_array = rb.ReadAsArray(*src_offset)
# calculate new geotransform of the feature subset
new_gt = (
(rgt[0] + (src_offset[0] * rgt[1])),
rgt[1],
0.0,
(rgt[3] + (src_offset[1] * rgt[5])),
0.0,
rgt[5],
)
# Create a temporary vector layer in memory
mem_ds = mem_drv.CreateDataSource("out")
mem_layer = mem_ds.CreateLayer("poly", None, ogr.wkbPolygon)
mem_layer.CreateFeature(feat.Clone())
# Rasterize it
rvds = driver.Create(
"", src_offset[2], src_offset[3], 1, gdal.GDT_Byte, options=["COMPRESS=LZW"]
)
rvds.SetGeoTransform(new_gt)
gdal.RasterizeLayer(rvds, [1], mem_layer, burn_values=[1])
rv_array = rvds.ReadAsArray()
# Mask the source data array with our current feature
# we take the logical_not to flip 0<->1 to get the correct mask effect
# we also mask out nodata values explictly
masked = np.ma.MaskedArray(
src_array,
mask=np.logical_or(src_array == nodata_value, np.logical_not(rv_array)),
)
if categorical:
# Get cell counts for each category
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(
zip([np.asscalar(k) for k in keys], [np.asscalar(c) for c in counts])
)
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
# Get summary stats
feature_stats = {
"min": float(masked.min()),
"mean": float(masked.mean()),
"max": float(masked.max()),
"std": float(masked.std()),
"sum": float(masked.sum()),
"count": int(masked.count()),
"fid": int(feat.GetFID()),
}
stats.append(feature_stats)
rvds = None
mem_ds = None
feat = vlyr.GetNextFeature()
# Tidy up
vds = None
rds = None
for fname in ["temp.cpg", "temp.dbf", "temp.prj", "temp.shp", "temp.shx"]:
os.remove(os.path.join(temp_fold, fname))
# Combine results
df = pd.DataFrame(stats)
df.fillna(0, inplace=True)
df["series_id"] = ser_id
df["fid"] = df.index
gdf["fid"] = gdf.index
gdf = gdf.merge(df, on="fid")
# Calc areas
gdf["exceeded_area_km2"] = gdf["exceeded"] * cs * cs / 1e6
gdf["total_area_km2"] = (gdf["exceeded"] + gdf["not_exceeded"]) * cs * cs / 1e6
gdf["pct_exceeded"] = 100 * gdf["exceeded_area_km2"] / gdf["total_area_km2"]
gdf.drop(
[
"fid",
"exceeded",
"not_exceeded",
"ndep_mgpm2pyr",
"ndep_meqpm2pyr",
"ndep_kgphapyr",
],
axis="columns",
inplace=True,
)
gdf.dropna(how="any", inplace=True)
if write_to_db:
gdf2 = gdf.copy()
del gdf2["geom"]
df = pd.DataFrame(gdf2)
df.to_sql(
"exceedance_stats_0_1deg_grid",
eng,
"vegetation",
if_exists="append",
index=False,
method="multi",
chunksize=1000,
)
return gdf
def exceedance_stats_per_land_use_class(
ex_tif_bool, veg_tif, ser_id, eng, write_to_db=True, nodata_value=255
):
"""Summarise exceedance values for each land use class.
Args:
ex_tif_bool: Raw str. Path to boolean exceedance raster
veg_tif: Str. Path to vegetation data with same resolution as ex_tif_bool
ser_id: Int. Deposition series ID
eng: Obj. Active database connection object
write_to_db: Bool. If True, results will be written to the database
nodata_value: Float. Value in rasters to treat as NoData
Returns:
GeoDataFrame of land use statistics.
"""
import gdal
import ogr
import numpy as np
import pandas as pd
import sys
import geopandas as gpd
import os
from gdalconst import GA_ReadOnly
gdal.PushErrorHandler("CPLQuietErrorHandler")
# Read LU table
sql = "SELECT * FROM vegetation.land_class_crit_lds"
lu_df = pd.read_sql(sql, eng)
# Read exceedance raster
rds = gdal.Open(ex_tif_bool, GA_ReadOnly)
assert rds
rb = rds.GetRasterBand(1)
if nodata_value:
nodata_value = float(nodata_value)
rb.SetNoDataValue(nodata_value)
ex_array = rb.ReadAsArray()
# Get cell size
rgt = rds.GetGeoTransform()
cs = rgt[1]
# Read vegetation raster
rds = gdal.Open(veg_tif, GA_ReadOnly)
assert rds
rb = rds.GetRasterBand(1)
if nodata_value:
nodata_value = float(nodata_value)
rb.SetNoDataValue(nodata_value)
veg_array = rb.ReadAsArray()
# Loop through land classes
stats = []
for idx, row in lu_df.iterrows():
# Mask the source data array
masked = np.ma.MaskedArray(
ex_array,
mask=np.logical_or(
ex_array == nodata_value, veg_array != row["norut_code"]
),
)
# Get cell counts for each category
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(
zip([np.asscalar(k) for k in keys], [np.asscalar(c) for c in counts])
)
feature_stats = dict(pixel_count)
feature_stats = remap_categories(
{1: "exceeded", 0: "not_exceeded"}, feature_stats
)
stats.append(feature_stats)
# Tidy up
rds = None
# Combine results
df = pd.DataFrame(stats)
df.fillna(0, inplace=True)
df["norut_code"] = lu_df["norut_code"]
df["series_id"] = ser_id
# Calc areas
df["exceeded_area_km2"] = df["exceeded"] * cs * cs / 1e6
df["total_area_km2"] = (df["exceeded"] + df["not_exceeded"]) * cs * cs / 1e6
df["pct_exceeded"] = 100 * df["exceeded_area_km2"] / df["total_area_km2"]
del df["exceeded"], df["not_exceeded"]
df.dropna(how="any", inplace=True)
if write_to_db:
df.to_sql(
"exceedance_stats_land_class",
eng,
"vegetation",
if_exists="append",
index=False,
method="multi",
chunksize=1000,
)
return df
def veg_exceedance_as_gdf_0_1deg(ser_id, eng, shp_path=None):
"""Extracts exceedance statistics for the specified series as a
geodataframe using NILU's 0.1 degree grid. Optionally, the data
can be saved as a shapefile.
Args:
ser_id: Int. ID for deposition series of interest
eng: Obj. Active database connection object
shp_path: Str. Raw path for shapefile to be created
Returns:
GeoDataFrame.
"""
import geopandas as gpd
# Get dep values
sql_args = {"ser_id": ser_id}
sql = (
"SELECT ST_Transform(b.geom, 32633) AS geom, "
" a.cell_id, "
" a.exceeded_area_km2, "
" a.total_area_km2, "
" a.pct_exceeded "
"FROM (SELECT cell_id, "
" exceeded_area_km2, "
" total_area_km2, "
" pct_exceeded "
" FROM vegetation.exceedance_stats_0_1deg_grid "
" WHERE series_id = {ser_id}) AS a, "
"deposition.dep_grid_0_1deg AS b "
"WHERE a.cell_id = b.cell_id"
).format(**sql_args)
gdf = gpd.read_postgis(sql, eng)
if shp_path:
gdf.to_file(shp_path)
return gdf
def calc_anclimit_cla_clminmax(df, bc0):
"""Calculates the ANC Limit, the CLA and the CLmin and CLmax values using
the specified version of BC0, both with and without a correction for organic
acids).
Used by calculate_critical_loads_for_water()
Args:
df: Dataframe.
bc0: Str. One of ['BC0', 'BC0_Ffac', 'BC0_magic']
Returns:
Dataframe with new columns for
['ANClimit_{bc0}',
'ANClimitOAA_{bc0}',
'CLA_{bc0}',
'CLAOAA_{bc0}',
'CLmaxS_{bc0}',
'CLmaxSoaa_{bc0}',
'CLmaxN_{bc0}',
'CLmaxNoaa_{bc0}',
]
"""
import numpy as np
assert bc0 in [
"BC0",
"BC0_Ffac",
"BC0_magic",
], "'bc0' must be one of ['BC0', 'BC0_Ffac', 'BC0_magic']."
method = bc0.replace("BC0", "")
# ANC limit
df[f"ANClimit{method}"] = np.minimum(
50, (0.25 * df["Runoff"] * df[bc0]) / (1 + 0.25 * df["Runoff"])
)
# ANC limit OAA
df[f"ANClimitOAA{method}"] = np.minimum(
40,
(0.2 * df["Runoff"] * (df[bc0] - 3.4 * df["TOC"])) / (1 + 0.2 * df["Runoff"]),
)
# CLA
df[f"CLA{method}"] = df["Runoff"] * (df[bc0] - df[f"ANClimit{method}"])
# CLA OAA
df[f"CLAOAA{method}"] = df["Runoff"] * (
df[bc0] - df[f"ANClimitOAA{method}"] - 3.4 * df["TOC"]
)
# CLmaxS
df[f"CLmaxS{method}"] = df[f"CLA{method}"] / df["alphaS"]
# CLmaxSoaa
df[f"CLmaxSoaa{method}"] = df[f"CLAOAA{method}"] / df["alphaS"]
# CLmaxN
df[f"CLmaxN{method}"] = df["CLminN"] + (df[f"CLA{method}"] / df["alphaN"])
# CLmaxNoaa
df[f"CLmaxNoaa{method}"] = df["CLminN"] + (df[f"CLAOAA{method}"] / df["alphaN"])
return df
def calculate_critical_loads_for_water(
xl_path=None, req_df=None, opt_df=None, mag_df=None, out_csv=None
):
"""Calculates critical loads for water based on values entered in an
Excel template (input_template_critical_loads_water.xlsx) or from
the database. See the Excel file for full details of the input
data requirements.
You must provide EITHER the 'xl_path' OR the three separate
dataframes - NOT BOTH.
This function performs broadly the same calculations as Tore's 'CL'
and 'CALKBLR' packages in RESA2, but generalised to allow for more
flexible input data. The original critical loads calculations were
implemented by Tore's 'cl.clcalculations' function, which has been
documented by Kari:
K:\Avdeling\317 Klima- og miljømodellering\KAU\Focal Centre\Data\CL script 23032015_notes.docx
These notes form the basis for much of the code here.
Args:
xl_path: Str. Path to completed copy the Excel input template
req_df: Dataframe of required parameters
opt_df: Dataframe of optional parameters
mag_df: Dataframe of magic parameters
out_csv: Raw str. The final dataframe is saved to the specified path
Returns:
Dataframe.
"""
import pandas as pd
import numpy as np
# Check input
if xl_path and (req_df is not None or mag_df is not None or opt_df is not None):
message = (
"ERROR: You must provide EITHER the 'xl_path' OR the three "
"separate dataframes - NOT both."
)
print(message)
raise ValueError(message)
if xl_path and (req_df is None and mag_df is None and opt_df is None):
# Read worksheets
req_df = pd.read_excel(xl_path, sheet_name="required_parameters")
opt_df = | pd.read_excel(xl_path, sheet_name="optional_parameters") | pandas.read_excel |
import sys
import numpy as np
import pandas as pd
from sqlalchemy import create_engine, inspect
from sqlalchemy import delete
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import MetaData
# command to start the script
# python process_data.py disaster_messages.csv disaster_categories.csv DB_disaster_msg.db
# Constants
# Name of the messages table
DB_TABLE_NAME = 'Messages'
def load_data(messages_filepath, categories_filepath):
"""
read the data from 2 csv files
input:
- messages_filepath: file path to the messages file
- categories_filepath: file path to the categories file
"""
try:
# load the 2 data sets
messages = pd.read_csv(messages_filepath)
print('Dataset Messages: ', messages.shape)
categories = pd.read_csv(categories_filepath)
print('Dateset Categories: ', categories.shape)
except:
print("Unable to load files. Check Parameters")
return | pd.DataFrame() | pandas.DataFrame |
import matplotlib
matplotlib.use('TkAgg') # noqa
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.cm as cm
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import cmocean
import numpy as np
import os
import ast
import pickle
import pandas as pd
from collections import defaultdict
from oggm import workflow, cfg, tasks, utils
from oggm.core.flowline import FileModel
from oggm.graphics import plot_centerlines
from relic.postprocessing import (mae_weighted, optimize_cov, calc_coverage,
get_ensemble_length, get_rcp_ensemble_length)
from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT
def paramplots(df, glid, pout, y_len=None):
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig1, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=[20, 7])
allvars = ['prcp_scaling_factor', 'mbbias', 'glena_factor']
varcols = {'mbbias': np.array([-1400, -1200, -1000, -800, -600, -400, -200,
-100, 0, 100, 200, 400, 600, 800, 1000]),
'prcp_scaling_factor': np.arange(0.5, 4.1, 0.25),
'glena_factor': np.arange(1, 4.1, 0.5)}
for var, ax in zip(allvars, [ax1, ax2, ax3]):
notvars = allvars.copy()
notvars.remove(var)
# lets use OGGM HISTALP default
papar = {'glena_factor': 1.0, 'mbbias': 0, 'prcp_scaling_factor': 1.75}
# store specific runs
dfvar = pd.DataFrame([], columns=varcols[var], index=df.index)
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.isclose(para[notvars[0]],
papar[notvars[0]], atol=0.01)) and
(np.isclose(para[notvars[1]],
papar[notvars[1]], atol=0.01))):
dfvar.loc[:, para[var]] = df.loc[:, run]
if var == 'prcp_scaling_factor':
lbl = 'Precip scaling factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.deep))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.375, 4.2, 0.25)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'glena_factor':
lbl = 'Glen A factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.matter))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.75, 4.3, 0.5)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'mbbias':
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.balance))
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplist[128] = (0.412, 0.847, 0.655, 1.0)
cmap = mcolors.LinearSegmentedColormap.from_list('mcm', cmaplist,
cmap.N)
cbarticks = np.array([-1400, -1000, -600, -200,
0, 200, 600, 1000])
bounds = np.array([-1500, -1300, -1100, -900, -700, -500, -300,
-150, -50, 50, 100, 300, 500, 700, 900, 1100])
normalize = mcolors.Normalize(vmin=-1600,
vmax=1600)
lbl = 'MB bias [mm w.e.]'
colors = [cmap(normalize(n)) for n in varcols[var]]
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap)
cbaxes = inset_axes(ax, width="3%", height="40%", loc=3)
cbar = plt.colorbar(scalarmappaple, cax=cbaxes,
label=lbl,
boundaries=bounds)
cbar.set_ticks(cbarticks)
cbaxes.tick_params(axis='both', which='major', labelsize=16)
cbar.set_label(label=lbl, size=16)
# plot observations
df.loc[:, 'obs'].rolling(1, min_periods=1).mean(). \
plot(ax=ax, color='k', style='.',
marker='o', label='Observed length change',
markersize=6)
dfvar = dfvar.sort_index(axis=1)
# default parameter column
dc = np.where(dfvar.columns == papar[var])[0][0]
dfvar.loc[:, varcols[var][dc]].rolling(y_len, center=True).mean(). \
plot(ax=ax, color=colors[dc], linewidth=5,
label='{}: {} (OGGM default)'.
format(lbl, str(varcols[var][dc])))
# all parameters
nolbl = ['' for i in np.arange(len(dfvar.columns))]
dfvar.columns = nolbl
dfvar.rolling(y_len, center=True).mean().plot(ax=ax, color=colors,
linewidth=2)
ax.set_xlabel('Year', fontsize=26)
ax.set_xlim([1850, 2010])
ax.set_ylim([-4000, 2000])
ax.tick_params(axis='both', which='major', labelsize=22)
if not ax == ax1:
ax.set_yticklabels([])
ax.grid(True)
ax.set_xticks(np.arange(1880, 2010, 40))
ax.legend(fontsize=16, loc=2)
ax1.set_ylabel('relative length change [m]', fontsize=26)
name = name_plus_id(rgi_id)
fig1.suptitle('%s' % name, fontsize=28)
fig1.subplots_adjust(left=0.09, right=0.99, bottom=0.12, top=0.89,
wspace=0.05)
fn1 = os.path.join(pout, 'calibration_%s.png' % glid)
fig1.savefig(fn1)
def past_simulation_and_params(glcdict, pout, y_len=5):
for glid, df in glcdict.items():
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig = plt.figure(figsize=[20, 7])
gs = GridSpec(1, 4) # 1 rows, 4 columns
ax1 = fig.add_subplot(gs[0, 0:3])
ax2 = fig.add_subplot(gs[0, 3])
df.loc[:, 'obs'].plot(ax=ax1, color='k', marker='o',
label='Observations')
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.abs(para['prcp_scaling_factor'] - 1.75) < 0.01) and
(para['mbbias'] == 0) and
(para['glena_factor'] == 1)):
df.loc[:, run].rolling(y_len, center=True). \
mean().plot(ax=ax1, linewidth=2, color='k',
label='OGGM default parameter run')
oggmdefault = run
maes = mae_weighted(df).sort_values()
idx2plot = optimize_cov(df.loc[:, maes.index[:150]],
df.loc[:, 'obs'], glid, minuse=5)
ensmean = df.loc[:, idx2plot].mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df.loc[:, idx2plot].std(axis=1).rolling(y_len,
center=True).mean()
# coverage
cov = calc_coverage(df, idx2plot, df['obs'])
ax1.fill_between(ensmeanmean.index, ensmeanmean - ensstdmean,
ensmeanmean + ensstdmean, color='xkcd:teal', alpha=0.5)
# nolbl = df.loc[:, idx2plot2].rolling(y_len, center=True).mean().copy()
# nolbl.columns = ['' for i in range(len(nolbl.columns))]
#df.loc[:, idx2plot2].rolling(y_len, center=True).mean().plot(
# ax=ax1, linewidth=0.8)
# plot ens members
ensmeanmean.plot(ax=ax1, linewidth=4.0, color='xkcd:teal',
label='ensemble parameters runs')
# reference run (basically min mae)
df.loc[:, maes.index[0]].rolling(y_len, center=True).mean(). \
plot(ax=ax1, linewidth=3, color='xkcd:lavender',
label='minimum wMAE parameter run')
name = name_plus_id(rgi_id)
mae_ens = mae_weighted(pd.concat([ensmean, df['obs']], axis=1))[0]
mae_best = maes[0]
ax1.set_title('%s' % name, fontsize=28)
ax1.text(2030, -4900, 'wMAE ensemble mean = %.2f m\n'
'wMAE minimum run = %.2f m' %
(mae_ens, mae_best), fontsize=18,
horizontalalignment='right')
ax1.text(2040, -4900, '%d ensemble members\n'
'coverage = %.2f' %
(len(idx2plot), cov), fontsize=18)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.set_xlim([1850, 2020])
ax1.set_ylim([-3500, 1000])
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.1, -0.15), loc='upper left',
fontsize=18, ncol=2)
# parameter plots
from colorspace import sequential_hcl
col = sequential_hcl('Blue-Yellow').colors(len(idx2plot) + 3)
for i, run in enumerate(idx2plot):
para = ast.literal_eval('{' + run + '}')
psf = para['prcp_scaling_factor']
gla = para['glena_factor']
mbb = para['mbbias']
mbb = (mbb - -1400) * (4-0.5) / (1000 - -1400) + 0.5
ax2.plot([1, 2, 3], [psf, gla, mbb], color=col[i], linewidth=2)
ax2.set_xlabel('calibration parameters', fontsize=18)
ax2.set_ylabel('Precipitation scaling factor\nGlen A factor',
fontsize=18)
ax2.set_xlim([0.8, 3.2])
ax2.set_ylim([0.3, 4.2])
ax2.set_xticks([1, 2, 3])
ax2.set_xticklabels(['Psf', 'GlenA', 'MB bias'], fontsize=16)
ax2.tick_params(axis='y', which='major', labelsize=16)
ax2.grid(True)
ax3 = ax2.twinx()
# scale to same y lims
scale = (4.2-0.3)/(4.0-0.5)
dy = (2400*scale-2400)/2
ax3.set_ylim([-1400-dy, 1000+dy])
ax3.set_ylabel('mass balance bias [m w.e. ]', fontsize=18)
ax3.set_yticks(np.arange(-1400, 1100, 400))
ax3.set_yticklabels(['-1.4', '-1.0', '-0.6', '-0.2',
'0.2', '0.6', '1.0'])
ax3.tick_params(axis='both', which='major', labelsize=16)
fig.subplots_adjust(left=0.08, right=0.95, bottom=0.24, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'histalp_%s.png' % glid)
fig.savefig(fn1)
used = dict()
used['oggmdefault'] = oggmdefault
used['minmae'] = idx2plot[0]
used['ensemble'] = idx2plot
pickle.dump(used, open(os.path.join(pout, 'runs_%s.p' % glid), 'wb'))
def past_simulation_and_commitment(rgi, allobs, allmeta, histalp_storage,
comit_storage, comit_storage_noseed,
pout, y_len=5, comyears=300):
cols = ['xkcd:teal',
'xkcd:orange',
'xkcd:azure',
'xkcd:tomato',
'xkcd:blue',
'xkcd:chartreuse',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[:2015].index,
ensmeanmean.loc[:2015] - ensstdmean.loc[:2015],
ensmeanmean.loc[:2015] + ensstdmean.loc[:2015],
color=cols[0], alpha=0.5)
ensmeanmean.loc[:2015].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# 1999
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[1], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='Random climate (1984-2014)')
# 1970
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[5], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[5],
label='Random climate (1960-1980)')
# 1885
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[2], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='Random climate (1870-1900)')
# ---------------------------------------------------------------------
# plot commitment ensemble length
# 1984
efn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
edf99 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn99, meta)
ensmean = edf99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
postlength + poststd, postlength - poststd,
color=cols[3], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [postlength, postlength], linewidth=4.0,
color=cols[3],
label=('Random climate (1984-2014) '
'equlibrium length'))
# 1970
efn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
edf70 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn70, meta)
ensmean = edf70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[6],
label=('Random climate (1960-1980) '
'equlibrium length'))
# 1885
efn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
edf85 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn85, meta)
ensmean = edf85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[4], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[4],
label=('Random climate (1870-1900) '
'equlibrium length'))
# ---------------------------------------------------------------------
ylim = ax1.get_ylim()
#ax1.plot([2015, 2015], ylim, 'k-', linewidth=2)
ax1.set_xlim([1850, 2014+comyears+30])
#ax1.set_ylim(ylim)
ax2 = ax1.twinx()
ax2.set_ylabel('approximate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.set_xticks([1850, 1950, 2014, 2114, 2214, 2314])
ax1.set_xticklabels(['1850', '1950', '2014/0', '100', '200', '300'])
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.0, -0.17), loc='upper left', fontsize=18,
ncol=3)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'commit_%s.png' % rgi)
fig.savefig(fn1)
def past_simulation_and_projection(rgi, allobs, allmeta, histalp_storage,
proj_storage, comit_storage,
pout, y_len=5,):
cols = ['xkcd:teal',
'xkcd:azure',
'xkcd:lime',
'xkcd:orange',
'xkcd:magenta',
'xkcd:tomato',
'xkcd:blue',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
dfall = pd.DataFrame([], index=np.arange(1850, 2101))
dfallstd = pd.DataFrame([], index=np.arange(1850, 2101))
for rcp in ['rcp26', 'rcp45', 'rcp60', 'rcp85']:
dfrcp = get_rcp_ensemble_length(rgi, histalp_storage, proj_storage,
rcp, meta)
ensmean = dfrcp.mean(axis=1)
dfall.loc[:, rcp] = ensmean.rolling(y_len, center=True).mean()
dfallstd.loc[:, rcp] = dfrcp.std(axis=1).\
rolling(y_len, center=True).mean()
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ax1.fill_between(dfall.loc[:2015, rcp].index,
dfall.loc[:2015, rcp] - dfallstd.loc[:2015, rcp],
dfall.loc[:2015, rcp] + dfallstd.loc[:2015, rcp],
color=cols[0], alpha=0.5)
dfall.loc[:2015, rcp].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# projections
# rcp26
ax1.fill_between(dfall.loc[2015:, 'rcp26'].index,
dfall.loc[2015:, 'rcp26'] - dfallstd.loc[2015:, 'rcp26'],
dfall.loc[2015:, 'rcp26'] + dfallstd.loc[2015:, 'rcp26'],
color=cols[1], alpha=0.5)
dfall.loc[2015:, 'rcp26'].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='RCP 2.6 climate')
# rcp45
dfall.loc[2015:, 'rcp45'].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='RCP 4.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# rcp60
dfall.loc[2015:, 'rcp60'].plot(ax=ax1, linewidth=4.0, color=cols[3],
label='RCP 6.0 climate')
# rcp85
ax1.fill_between(dfall.loc[2015:, 'rcp85'].index,
dfall.loc[2015:, 'rcp85'] - dfallstd.loc[2015:, 'rcp85'],
dfall.loc[2015:, 'rcp85'] + dfallstd.loc[2015:, 'rcp85'],
color=cols[4], alpha=0.5)
dfall.loc[2015:, 'rcp85'].plot(ax=ax1, linewidth=4.0, color=cols[4],
label='RCP 8.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# plot commitment length
# 1984
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
postlength + poststd, postlength - poststd,
color=cols[5], alpha=0.5)
ax1.plot([2105.5, 2110.5], [postlength, postlength], linewidth=4.0,
color=cols[5],
label=('Random climate (1984-2014) '
'equilibrium length'))
# 1970
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[7], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[7],
label=('Random climate (1960-1980) '
'equilibrium length'))
# 1885
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[6],
label=('Random climate (1870-1900) '
'equilibrium length'))
ylim = ax1.get_ylim()
ax1.set_xlim([1850, 2112])
ax2 = ax1.twinx()
ax2.set_ylabel('apporixmate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(0.0, -0.17), loc='upper left', fontsize=18,
ncol=4)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'proj_%s.png' % rgi)
fig.savefig(fn1)
def get_mean_temps_eq(rgi, histalp_storage, comit_storage, ensmembers):
from oggm import cfg, utils, GlacierDirectory
from oggm.core.massbalance import MultipleFlowlineMassBalance
from oggm.core.flowline import FileModel
import shutil
# 1. get mean surface heights
df85 = pd.DataFrame([])
df99 = pd.DataFrame([])
for i in range(ensmembers):
fnc1 = os.path.join(comit_storage, rgi,
'model_run_commitment1885_{:02d}.nc'.format(i))
fnc2 = os.path.join(comit_storage, rgi,
'model_run_commitment1999_{:02d}.nc'.format(i))
tmpmod1 = FileModel(fnc1)
tmpmod2 = FileModel(fnc2)
for j in np.arange(270, 301):
tmpmod1.run_until(j)
df85.loc[:, '{}{}'.format(i, j)] = tmpmod1.fls[-1].surface_h
tmpmod2.run_until(j)
df99.loc[:, '{}{}'.format(i, j)] = tmpmod2.fls[-1].surface_h
meanhgt99 = df99.mean(axis=1).values
meanhgt85 = df85.mean(axis=1).values
# 2. get the climate
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
i = 0
storage_dir = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
rgi[:8], rgi[:11], rgi)
new_dir = os.path.join(cfg.PATHS['working_dir'], 'per_glacier',
rgi[:8], rgi[:11], rgi)
shutil.copytree(storage_dir, new_dir)
gdir = GlacierDirectory(rgi)
mb = MultipleFlowlineMassBalance(gdir, filename='climate_monthly',
check_calib_params=False)
# need to do the above for every ensemble member if I consider PRECIP!
# and set cfg.PARAMS['prcp_scaling_factor'] = pdict['prcp_scaling_factor']
df99_2 = pd.DataFrame()
df85_2 = pd.DataFrame()
for i in np.arange(9, 12):
for y in np.arange(1870, 1901):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt85,
flyear)[0]
df85_2.loc[y, i] = tmp.mean()
for y in np.arange(1984, 2015):
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt99,
flyear)[0]
df99_2.loc[y, i] = tmp.mean()
t99 = df99_2.mean().mean()
t85 = df85_2.mean().mean()
return t85, t99
def get_mean_temps_2k(rgi, return_prcp):
from oggm import cfg, utils, workflow, tasks
from oggm.core.massbalance import PastMassBalance
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
cfg.PARAMS['prcp_scaling_factor'] = 1.75
gdir = workflow.init_glacier_regions(rgidf=rgi.split('_')[0],
from_prepro_level=3,
prepro_border=10)[0]
# run histalp climate on glacier!
tasks.process_histalp_data(gdir)
f = gdir.get_filepath('climate_historical')
with utils.ncDataset(f) as nc:
refhgt = nc.ref_hgt
mb = PastMassBalance(gdir, check_calib_params=False)
df = pd.DataFrame()
df2 = pd.DataFrame()
for y in np.arange(1870, 2015):
for i in np.arange(9, 12):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.get_monthly_climate([refhgt], flyear)[0]
df.loc[y, i] = tmp.mean()
if return_prcp:
for i in np.arange(3, 6):
flyear = utils.date_to_floatyear(y, i)
pcp = mb.get_monthly_climate([refhgt], flyear)[3]
df2.loc[y, i] = tmp.mean()
t99 = df.loc[1984:2014, :].mean().mean()
t85 = df.loc[1870:1900, :].mean().mean()
t2k = df.loc[1900:2000, :].mean().mean()
if return_prcp:
p99 = df2.loc[1984:2014, :].mean().mean()
p85 = df2.loc[1870:1900, :].mean().mean()
p2k = df2.loc[1900:2000, :].mean().mean()
return t85, t99, t2k, p85, p99, p2k
return t85, t99, t2k
def get_absolute_length(y0, y1, rgi, df, storage):
rgipath = os.path.join(storage, rgi, '{:02d}'.format(0),
rgi[:8], rgi[:11], rgi)
mfile = os.path.join(rgipath, 'model_run_histalp_{:02d}.nc'.format(0))
tmpmod = FileModel(mfile)
absL = tmpmod.length_m
deltaL = df.loc[int(tmpmod.yr.values), 0]
abs_y0 = absL + (y0 - deltaL)
abs_y1 = absL + (y1 - deltaL)
return abs_y0, abs_y1
def elevation_profiles(rgi, meta, histalp_storage, pout):
name = name_plus_id(rgi)
df1850 = pd.DataFrame()
df2003 = pd.DataFrame()
df2003b = | pd.DataFrame() | pandas.DataFrame |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df = pd.read_csv('/kaggle/input/forest-cover-type-prediction/train.csv')
test_df = pd.read_csv('/kaggle/input/forest-cover-type-prediction/test.csv')
print(df.head(10))
print(len(df))
# print(df.describe())
print(df.columns)
print(test_df.head(10))
print(len(test_df))
# print(df.describe())
print(test_df.columns)
df['Cover_Type'].describe()
pd.value_counts(df['Cover_Type'])
df.describe()
import matplotlib.pyplot as plt
df.drop(axis=1, columns=['Soil_Type7','Soil_Type15'], inplace=True)
# Convert the Wilderness Area one hot encoded to single column
columns = ['Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3',
'Wilderness_Area4']
wilderness_types = []
for index, row in df.iterrows():
dummy = 'Wilderness_Area_NA'
for col in columns:
if row[col] == 1:
dummy = col
break
wilderness_types.append(dummy)
df['Wilderness_Areas'] = wilderness_types
# Convert the Soil Type one hot encoded to single column
columns = ['Soil_Type1', 'Soil_Type2', 'Soil_Type3',
'Soil_Type4', 'Soil_Type5', 'Soil_Type6', 'Soil_Type8',
'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12',
'Soil_Type13', 'Soil_Type14', 'Soil_Type16',
'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',
'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24',
'Soil_Type25', 'Soil_Type26', 'Soil_Type27', 'Soil_Type28',
'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32',
'Soil_Type33', 'Soil_Type34', 'Soil_Type35', 'Soil_Type36',
'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40']
soil_types = []
for index, row in df.iterrows():
dummy = 'Soil_Type_NA'
for col in columns:
if row[col] == 1:
dummy = col
break
soil_types.append(dummy)
df['Soil_Types'] = soil_types
print(pd.value_counts(df['Soil_Types']))
ax = df['Soil_Types'].value_counts().plot(kind='bar',
figsize=(8,5),
title="Number for each Soli Type")
ax.set_xlabel("Soil Types")
ax.set_ylabel("Frequency")
plt.show()
print(pd.value_counts(df['Wilderness_Areas']))
ax1 = df['Wilderness_Areas'].value_counts().plot(kind='bar',
figsize=(8,5),
title="Number for each Soli Type")
ax1.set_xlabel("Wilderness_Areas")
ax1.set_ylabel("Frequency")
plt.show()
df.drop(axis=1, columns=['Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3',
'Wilderness_Area4'], inplace=True)
df.drop(axis=1, columns=['Soil_Type1', 'Soil_Type2', 'Soil_Type3',
'Soil_Type4', 'Soil_Type5', 'Soil_Type6', 'Soil_Type8',
'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12',
'Soil_Type13', 'Soil_Type14', 'Soil_Type16',
'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',
'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24',
'Soil_Type25', 'Soil_Type26', 'Soil_Type27', 'Soil_Type28',
'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32',
'Soil_Type33', 'Soil_Type34', 'Soil_Type35', 'Soil_Type36',
'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40'], inplace=True)
df['Soil_Types'].replace(to_replace={'Soil_Type8': 'Soil_Type_NA', 'Soil_Type25': 'Soil_Type_NA'}, inplace=True)
print(pd.value_counts(df['Soil_Types']))
## Apply to test_df
test_df.drop(axis=1, columns=['Soil_Type7','Soil_Type15'], inplace=True)
# Convert the Wilderness Area one hot encoded to single column
columns = ['Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3',
'Wilderness_Area4']
wilderness_types = []
for index, row in test_df.iterrows():
dummy = 'Wilderness_Area_NA'
for col in columns:
if row[col] == 1:
dummy = col
break
wilderness_types.append(dummy)
test_df['Wilderness_Areas'] = wilderness_types
# Convert the Soil Type one hot encoded to single column
columns = ['Soil_Type1', 'Soil_Type2', 'Soil_Type3',
'Soil_Type4', 'Soil_Type5', 'Soil_Type6', 'Soil_Type8',
'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12',
'Soil_Type13', 'Soil_Type14', 'Soil_Type16',
'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',
'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24',
'Soil_Type25', 'Soil_Type26', 'Soil_Type27', 'Soil_Type28',
'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32',
'Soil_Type33', 'Soil_Type34', 'Soil_Type35', 'Soil_Type36',
'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40']
soil_types = []
for index, row in test_df.iterrows():
dummy = 'Soil_Type_NA'
for col in columns:
if row[col] == 1:
dummy = col
break
soil_types.append(dummy)
test_df['Soil_Types'] = soil_types
print( | pd.value_counts(test_df['Soil_Types']) | pandas.value_counts |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os.path import join as pjoin
import datetime
import io
import os
import json
import pytest
from pyarrow.compat import guid, u
from pyarrow.filesystem import LocalFileSystem
import pyarrow as pa
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
import numpy as np
import pandas as pd
import pandas.util.testing as tm
# Ignore these with pytest ... -m 'not parquet'
parquet = pytest.mark.parquet
def _write_table(table, path, **kwargs):
import pyarrow.parquet as pq
if isinstance(table, pd.DataFrame):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
import pyarrow.parquet as pq
return pq.read_table(*args, **kwargs)
@parquet
def test_single_pylist_column_roundtrip(tmpdir):
for dtype in [int, float]:
filename = tmpdir.join('single_{}_column.parquet'
.format(dtype.__name__))
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=('a', 'b'))
_write_table(table, filename.strpath)
table_read = _read_table(filename.strpath)
for col_written, col_read in zip(table.itercolumns(),
table_read.itercolumns()):
assert col_written.name == col_read.name
assert col_read.data.num_chunks == 1
data_written = col_written.data.chunk(0)
data_read = col_read.data.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Test other timestamp resolutions now that arrow supports
# them
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
return df
@parquet
def test_pandas_parquet_2_0_rountrip(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = pq.read_pandas(filename.strpath)
assert b'pandas' in table_read.schema.metadata
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_custom_metadata(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
md = pq.read_metadata(filename.strpath).metadata
assert b'pandas' in md
js = json.loads(md[b'pandas'].decode('utf8'))
assert js['index_columns'] == ['__index_level_0__']
@parquet
def test_pandas_parquet_2_0_rountrip_read_pandas_no_index_written(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = json.loads(arrow_table.schema.metadata[b'pandas'].decode('utf8'))
assert not js['index_columns']
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = pq.read_pandas(filename.strpath)
js = json.loads(table_read.schema.metadata[b'pandas'].decode('utf8'))
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_1_0_rountrip(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename.strpath, version="1.0")
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_column_selection(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename.strpath)
table_read = _read_table(filename.strpath, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@parquet
def test_pandas_parquet_native_file_roundtrip(tmpdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_read_pandas_column_subset(tmpdir):
import pyarrow.parquet as pq
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@parquet
def test_pandas_parquet_empty_roundtrip(tmpdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_pyfile_roundtrip(tmpdir):
filename = tmpdir.join('pandas_pyfile_roundtrip.parquet').strpath
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with open(filename, 'wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(open(filename, 'rb').read())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_configuration_options(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename.strpath,
version="2.0",
use_dictionary=use_dictionary)
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP']:
_write_table(arrow_table, filename.strpath,
version="2.0",
compression=compression)
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(df):
import pyarrow.parquet as pq
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
@parquet
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.logical_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
rg_meta = meta.row_group(0)
repr(rg_meta)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
@parquet
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
assert fileh.schema.equals(fileh.schema)
assert fileh.schema.equals(fileh2.schema)
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema[0].equals(fileh.schema[0])
assert not fileh.schema[0].equals(fileh.schema[1])
@parquet
def test_column_of_arrays(tmpdir):
df, schema = dataframe_with_arrays()
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_coerce_timestamps(tmpdir):
# ARROW-622
df, schema = dataframe_with_arrays()
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='us')
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='unknown')
@parquet
def test_column_of_lists(tmpdir):
df, schema = dataframe_with_lists()
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_date_time_types():
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.Array.from_pandas(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.Array.from_pandas(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2000-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.Array.from_pandas(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.Array.from_pandas(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.Array.from_pandas(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.Array.from_pandas(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.Array.from_pandas(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.Array.from_pandas(data7, type=t7)
t7_us = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value
data7_us = np.array([start, start + 1000, start + 2000],
dtype='int64') // 1000
a7_us = pa.Array.from_pandas(data7_us, type=t7_us)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
# date64 as date32
# time32[s] to time32[ms]
# 'timestamp[ns]' to 'timestamp[us]'
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7_us],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
# date64 as date32
# time32[s] to time32[ms]
# 'timestamp[ns]' is saved as INT96 timestamp
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0',
use_deprecated_int96_timestamps=True)
# Unsupported stuff
def _assert_unsupported(array):
table = pa.Table.from_arrays([array], ['unsupported'])
buf = io.BytesIO()
with pytest.raises(NotImplementedError):
_write_table(table, buf, version="2.0")
t7 = pa.time64('ns')
a7 = pa.Array.from_pandas(data4.astype('int64'), type=t7)
_assert_unsupported(a7)
@parquet
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
def _check_roundtrip(table, expected=None, **params):
buf = io.BytesIO()
_write_table(table, buf, **params)
buf.seek(0)
if expected is None:
expected = table
result = _read_table(buf)
assert result.equals(expected)
@parquet
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, nthreads=4)
buf.seek(0)
table2 = _read_table(buf, nthreads=1)
assert table1.equals(table2)
@parquet
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@parquet
def test_pass_separate_metadata():
import pyarrow.parquet as pq
# ARROW-471
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@parquet
def test_read_single_row_group():
import pyarrow.parquet as pq
# ARROW-471
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@parquet
def test_read_single_row_group_with_column_subset():
import pyarrow.parquet as pq
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = df.columns[:2]
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@parquet
def test_parquet_piece_read(tmpdir):
import pyarrow.parquet as pq
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tmpdir.join('parquet_piece_read.parquet').strpath
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@parquet
def test_parquet_piece_basics():
import pyarrow.parquet as pq
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
@parquet
def test_partition_set_dictionary_type():
import pyarrow.parquet as pq
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@parquet
def test_read_partitioned_directory(tmpdir):
fs = LocalFileSystem.get_instance()
base_path = str(tmpdir)
_partition_test_for_filesystem(fs, base_path)
@pytest.yield_fixture
def s3_example():
access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']
secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']
bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']
import s3fs
fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.s3
@parquet
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
import pyarrow.parquet as pq
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
# Check that we can auto-wrap
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
import pyarrow.parquet as pq
foo_keys = [0, 1]
bar_keys = ['<KEY>']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
# partition_spec : list of lists, e.g. [['foo', [0, 1, 2],
# ['bar', ['a', 'b', 'c']]
# part_table : a pyarrow.Table to write to each partition
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = pjoin(base_dir, '{0}={1}'.format(name, value))
fs.mkdir(level_dir)
if level == DEPTH - 1:
# Generate example data
file_path = pjoin(level_dir, 'data.parq')
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
else:
_visit_level(level_dir, level + 1, this_part_keys)
_visit_level(base_dir, 0, [])
@parquet
def test_read_common_metadata_files(tmpdir):
import pyarrow.parquet as pq
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(tmpdir)
data_path = pjoin(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
metadata_path = pjoin(base_path, '_metadata')
pq.write_metadata(table.schema, metadata_path)
dataset = pq.ParquetDataset(base_path)
assert dataset.metadata_path == metadata_path
common_schema = pq.read_metadata(data_path).schema
assert dataset.schema.equals(common_schema)
# handle list of one directory
dataset2 = pq.ParquetDataset([base_path])
assert dataset2.schema.equals(dataset.schema)
@parquet
def test_read_schema(tmpdir):
import pyarrow.parquet as pq
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = pjoin(str(tmpdir), 'test.parquet')
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
assert table.schema.equals(pq.read_schema(data_path))
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@parquet
def test_read_multiple_files(tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
dirpath = tmpdir.join(guid()).strpath
os.mkdir(dirpath)
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(dirpath, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
with open(pjoin(dirpath, '_SUCCESS.crc'), 'wb') as f:
f.write(b'0')
def read_multiple_files(paths, columns=None, nthreads=None, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, nthreads=nthreads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [result[0], result[2], result[6], result[result.num_columns - 1]]
result = pa.localfs.read_parquet(
dirpath, columns=[c.name for c in to_read])
expected = pa.Table.from_arrays(to_read, metadata=result.schema.metadata)
assert result.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, nthreads=2)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tmpdir.join('{0}.parquet'.format(guid())).strpath
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@parquet
def test_dataset_read_pandas(tmpdir):
import pyarrow.parquet as pq
nfiles = 5
size = 5
dirpath = tmpdir.join(guid()).strpath
os.mkdir(dirpath)
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = pjoin(dirpath, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@parquet
def test_dataset_read_pandas_common_metadata(tmpdir):
# ARROW-1103
import pyarrow.parquet as pq
nfiles = 5
size = 5
dirpath = tmpdir.join(guid()).strpath
os.mkdir(dirpath)
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size))
df.index.name = 'index'
path = pjoin(dirpath, '{0}.parquet'.format(i))
df_ex_index = df.reset_index(drop=True)
df_ex_index['index'] = df.index
table = pa.Table.from_pandas(df_ex_index,
preserve_index=False)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(df)
pq.write_metadata(table_for_metadata.schema,
pjoin(dirpath, '_metadata'))
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = | pd.concat([x[columns] for x in frames]) | pandas.concat |
# Python Code implementation for Class_Reg ALGORITHM
import pandas as pd
import numpy as np
import itertools
from sklearn.model_selection import train_test_split
from models import *
from sklearn.metrics import accuracy_score,mean_squared_error
from tqdm import tqdm
import random
from sklearn.metrics import median_absolute_error
from sklearn.metrics import mean_absolute_error
import statistics
class class_reg(object):
""" A function combining classifiers and regressors"""
def __init__(self, data = None, X_cols = None,
y_col = None, test_size = 0.3,
validation_size = 0.2, epochs = 5,
metrics = 'wmape'):
self.data = data
self.X_cols = X_cols
self.y_col = y_col
self.test_size = test_size
self.validation_size = validation_size
self.epochs = epochs
self.metrics = metrics
self.test_X = None
self.test_y = None
self.classifier = None
self.regressor = None
self.mets = None
def fitted(self):
data = self.data
X_cols = self.X_cols
y_col = self.y_col
test_size = self.test_size
validation_size = self.validation_size
epochs = self.epochs
metrics = self.metrics
mape_vals = []
epoch_num = 0
X = data[X_cols]
y = pd.DataFrame(data[y_col])
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size = test_size,
random_state = 0)
y_test = list(processing.avgfit(list(y_test[y_col])))
dataset = []
for i in range(len(X_train)):
dataset.append(list(X_train.iloc[i]))
dataset = pd.DataFrame(dataset)
cols = []
for i in X_cols :
cols.append(i)
cols.append(y_col)
dataset[y_col] = y_train
dataset.columns = cols
self.test_X = X_test
self.test_y = y_test
self.train_X = X_train
self.train_y = y_train
for random_state in np.random.randint(0, 10000, epochs):
epoch_num = epoch_num + 1
X,y,n_classes = processing.split(dataset,
X_cols,
y_col)
X_train, X_test, y_train, y_test = processing.train_test(X,
y,
validation_size,
random_state)
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
epoch = str(epoch_num) + '/' + str(epochs)
print(' ')
print("Epoch " + epoch + ' :')
acc_conf, clf, reg = training.train(y, y_col,
X_train, y_train,
X_train_list, y_train_list,
X_test, y_test,
n_classes, random_state, metrics)
for acc_ in acc_conf:
mape_vals.append(acc_)
acc_vals, c, r = analysis.analyse(mape_vals,epochs)
self.acc_vals = acc_vals
classifier = clf[c]
regressor = []
for i in range(n_classes):
regressor.append(reg[i][r])
X_train = self.train_X
y_train = self.train_y
train = X_train
train[y_col] = y_train
X_train,y_train,n_classes = processing.split(train,
X_cols,
y_col)
classifier.fit(X_train, pd.DataFrame(y_train[1]))
X_train = processing.rem_col_name(X_train)
y_train = processing.rem_col_name(y_train)
X_train.columns = X_cols
#y_train.columns = [y_col]
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
for i in range(n_classes):
(regressor[i]).fit(X_train_list[i],y_train_list[i][0])
self.classifier = classifier
self.regressor = regressor
self.n_classes = n_classes
def fit(self, X, y, validation_size = 0.3, epochs = 1):
X_cols = X.columns
y_col = y.columns
X = processing.rem_col_name(X)
y = processing.rem_col_name(y)
X.columns = X_cols
y.columns = y_col
dataset = X
dataset[y_col] = y
epoch_num = 0
mape_vals = []
for random_state in np.random.randint(0, 10000, epochs):
epoch_num = epoch_num + 1
X,y,n_classes = processing.split(dataset,
X_cols,
y_col)
X_train, X_test, y_train, y_test = processing.train_test(X,
y,
validation_size,
random_state)
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
pd.DataFrame(y),
len(y),
n_classes,
'train')
epoch = str(epoch_num) + '/' + str(epochs)
print(' ')
print("Epoch " + epoch + ' :')
metrics = 'wmape'
acc_conf, clf, reg = training.train(y, y_col,
X_train, y_train,
X_train_list, y_train_list,
X_test, y_test,
n_classes, random_state, metrics)
for acc_ in acc_conf:
mape_vals.append(acc_)
acc_vals, c, r = analysis.analyse(mape_vals,epochs)
self.acc_vals = acc_vals
classifier = clf[c]
regressor = []
for i in range(n_classes):
regressor.append(reg[i][r])
X_train,y_train,n_classes = processing.split(dataset,X_cols,y_col)
classifier.fit(X_train, pd.DataFrame(y_train[1]))
X_train.columns = X_cols
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
for i in range(n_classes):
(regressor[i]).fit(X_train_list[i],y_train_list[i][0])
self.classifier = classifier
self.regressor = regressor
self.n_classes = n_classes
def predict(self, X):
clf = self.classifier
reg = self.regressor
if isinstance(X, pd.DataFrame):
pred = []
for i in range(len(X)):
arr = list(X.iloc[i])
pred.append(class_reg.pred(clf,reg,arr))
else:
X = ((np.array(X).reshape(1,-1)))
clf_pred = (clf.predict(X))[0]
class_ = ([int(s) for s in clf_pred.split() if s.isdigit()])[0]
pred = (reg[class_ - 1].predict(X))[0]
return(pred)
@classmethod
def pred(self,clf,reg,X):
X = ((np.array(X).reshape(1,-1)))
clf_pred = (clf.predict(X))[0]
class_ = ([int(s) for s in clf_pred.split() if s.isdigit()])[0]
pred = (reg[class_ - 1].predict(X))[0]
return(pred)
def performance(self):
clf = self.classifier
reg = self.regressor
data = self.data
X_cols = self.X_cols
y_col = self.y_col
test_size = self.test_size
X,y,n_classes = processing.split(data,
X_cols,
y_col)
mape_list = []
mse_list = []
for random_state in np.random.randint(0, 10000, 20):
X_train, X_test, y_train, y_test = processing.train_test(X,
y,
test_size,
random_state)
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
classi = clf
classi.fit(X_train, y_train[1])
regr = []
for i in range(n_classes):
regre_ = reg[i]
regre_.fit(X_train_list[i],y_train_list[i][0])
regr.append(regre_)
pred = []
for i in range(len(X_test)):
arr = list(X_test.iloc[i])
pred.append(class_reg.pred(classi, regr, arr))
mape = metric.wmape(list(y_test[0]), list(pred))
mse = mean_squared_error(list(y_test[0]),
pred,
squared = False)
mse = (np.sqrt(mse) - min(y_test[0]))/((max(y_test[0])) - min(y_test[0]))
mse = mse**2
mape_list.append(mape)
mse_list.append(mse)
mape = sum(mape_list)/len(mape_list)
mse = sum(mse_list)/len(mse_list)
mets = {'WMAPE' : mape, 'MSE' : mse}
self.mets = mets
return(mets)
class processing(object):
@classmethod
def avgfit(self,l):
self.l = l
na = | pd.isna(l) | pandas.isna |
"""
Changepoint Detection
=====================
You can detect trend and seasonality changepoints with just a few lines of code.
Provide your timeseries as a pandas dataframe with timestamp and value.
For example, to work with daily sessions data, your dataframe could look like this:
.. code-block:: python
import pandas as pd
df = pd.DataFrame({
"datepartition": ["2020-01-08-00", "2020-01-09-00", "2020-01-10-00"],
"macrosessions": [10231.0, 12309.0, 12104.0]
})
The time column can be any format recognized by ``pd.to_datetime``.
In this example, we'll load a dataset representing ``log(daily page views)``
on the Wikipedia page for <NAME>.
It contains values from 2007-12-10 to 2016-01-20. More dataset info
`here <https://facebook.github.io/prophet/docs/quick_start.html>`_.
"""
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import plotly
from greykite.algo.changepoint.adalasso.changepoint_detector import ChangepointDetector
from greykite.framework.benchmark.data_loader_ts import DataLoaderTS
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
# Loads dataset into UnivariateTimeSeries
dl = DataLoaderTS()
ts = dl.load_peyton_manning_ts()
df = ts.df # cleaned pandas.DataFrame
# %%
# Detect trend change points
# --------------------------
# Let's plot the original timeseries.
# There are actually trend changes within this data set.
# The `~greykite.framework.input.univariate_time_series.UnivariateTimeSeries`
# class is used to store a timeseries and to provide basic description and plotting functions.
# The ``load_peyton_manning`` function automatically returns a ``UnivariateTimeSeries`` instance,
# however, for any ``df``, you can always initialize a ``UnivariateTimeSeries`` instance and
# do further explorations.
# (The interactive plot is generated by ``plotly``: **click to zoom!**)
fig = ts.plot()
plotly.io.show(fig)
# %%
# `~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector`
# utilizes pre-filters, regularization with regression based models, and
# post-filters to find time points where trend changes.
#
# To create a simple trend changepoint detection model, we first initialize the
# `~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector` class,
# then run its attribute function ``find_trend_changepoints``.
model = ChangepointDetector()
res = model.find_trend_changepoints(
df=df, # data df
time_col="ts", # time column name
value_col="y") # value column name
pd.DataFrame({"trend_changepoints": res["trend_changepoints"]}) # prints a dataframe showing the result
# %%
# The code above runs trend changepoint detection with the default parameters.
# We may visualize the detection results by plotting it with the attribute
# function ``plot``.
fig = model.plot(plot=False) # plot = False returns a plotly figure object.
plotly.io.show(fig)
# %%
# There might be too many changepoints with the default parameters.
# We could customize the parameters to meet individual requirements.
#
# To understand the parameters, we introduce a little bit of the background
# knowledge. The algorithm first does a mean aggregation to eliminate small
# fluctuations/seasonality effects (``resample_freq``). This avoids the trend
# picking up small fluctuations/seasonality effects.
#
# Then a great number of potential changepoints are placed uniformly over the
# whole time span (specified by time between changepoints ``potential_changepoint_distance``
# or number of potential changepoints ``potential_changepoint_n``
# , the former overrides the latter).
#
# The adaptive lasso (more info
# at `adalasso <http://users.stat.umn.edu/~zouxx019/Papers/adalasso.pdf>`_)
# is used to shrink insignificant changepoints' coefficients to zero.
# The initial estimator for adaptive lasso could be one of "ols", "ridge"
# and "lasso" (``adaptive_lasso_initial_estimator``). The regularization
# strength of adaptive lasso is also controllable by users
# (``regularization_strength``, between 0.0 and 1.0, greater values imply
# fewer changepoints. ``None`` triggers cross-validation to select the best
# tuning parameter based on prediction performance).
#
# Yearly seasonality effect is too long to be eliminated by aggregation, so
# fitting it with trend is recommended (``yearly_seasonality_order``).
# This allows changepoints to distinguish trend from yearly seasonality.
#
# Putting changepoints too close to the end of data is not recommended,
# because we may not have enough data to fit the final trend,
# especially in forecasting tasks. Therefore, one could specify how far
# from the end changepoints are not allowed (specified by the time from the end
# of data ``no_changepoint_distance_from_end`` or proportion of data from the end
# ``no_changepoint_proportion_from_end``, the former overrides the latter).
#
# Finally, a post-filter is applied to eliminate changepoints that are too close
# (``actual_changepoint_min_distance``).
#
# The following parameter combination uses longer aggregation with less potential
# changepoints placed and higher yearly seasonality order. Changepoints are not
# allowed in the last 20% of the data
model = ChangepointDetector() # it's also okay to omit this and re-use the old instance
res = model.find_trend_changepoints(
df=df, # data df
time_col="ts", # time column name
value_col="y", # value column name
yearly_seasonality_order=15, # yearly seasonality order, fit along with trend
regularization_strength=0.5, # between 0.0 and 1.0, greater values imply fewer changepoints, and 1.0 implies no changepoints
resample_freq="7D", # data aggregation frequency, eliminate small fluctuation/seasonality
potential_changepoint_n=25, # the number of potential changepoints
no_changepoint_proportion_from_end=0.2) # the proportion of data from end where changepoints are not allowed
| pd.DataFrame({"trend_changepoints": res["trend_changepoints"]}) | pandas.DataFrame |
import os
import pandas as pd
import src.merge as merge
import src.utils as cutil
raw_data_dir = str(cutil.DATA_RAW / "usa")
int_data_dir = str(cutil.DATA_INTERIM / "usa")
proc_data_dir = str(cutil.DATA_PROCESSED / "adm1")
def main():
add_testing_regime = True
output_csv_name = "USA_processed.csv"
out_dir = proc_data_dir
cases_data = pd.read_csv(os.path.join(int_data_dir, "usa_usafacts_state.csv"))
cases_data["date"] = pd.to_datetime(cases_data["date"])
# drop any cases data columns that are all null
cases_data = cases_data.dropna(how="all", axis=1)
policy_data = pd.read_csv(
os.path.join(int_data_dir, "USA_policy_data_sources.csv"), encoding="latin"
)
# drop any rows which are all nan
policy_data = policy_data.dropna(how="all", axis=0)
policy_data = policy_data.rename(columns={"Optional": "optional"})
policy_data = policy_data.rename(columns={"date": "date_start"})
policy_data.loc[:, "date_start"] = | pd.to_datetime(policy_data["date_start"]) | pandas.to_datetime |
from multiprocessing import Pool
from os import makedirs
from os.path import join as pjoin
from traceback import print_exc
import warnings
import cv2
import numpy as np
import pandas as pd
from skimage.io import imsave
from skimage import img_as_ubyte
from lib.config import gen_config, class_labels
from lib.utils import (debug, display_imgs, info, load_img, str_to_labels, warn)
def filter_(n):
def filter_fixed(row):
return n in str_to_labels(row['Target'])
return filter_fixed
def crop_one_id(task):
try:
id_ = task['id_']
group = task['row']['group']
n_windows = task['row']['n_windows']
config = task['config']
n_candidate_windows = config['n_candidate_windows']
# random_if_no_centroid = config['random_if_no_centroid']
output_windowed_imgs_path = config['output_windowed_imgs_path']
output_windowed_imgs_extension = config['output_windowed_imgs_extension']
rgby_img = load_img(
id_,
group=group,
)
rgby_thumbnail = cv2.resize(rgby_img, (256, 256))
green_thumbnail = rgby_thumbnail[:, :, 1].astype(float)
# the reason we choose centers first is to ensure that every possible "interest point"
# in the picture will get equal chance of being captured. if we choose from all contained windows
# with equal chance, then the interest points around the edge of the picture will get
# less chance of being captured.
random_centers = np.random.rand(n_candidate_windows, 2) * (1 - config['win_size'] / 2) + config['win_size'] / 4
random_centers = np.minimum(random_centers, 1 - config['win_size'] / 2)
random_centers = np.maximum(random_centers, config['win_size'] / 2)
windows = [
np.array(
[
x - config['win_size'] / 2,
y - config['win_size'] / 2,
x + config['win_size'] / 2,
y + config['win_size'] / 2,
]
) for x, y in random_centers
]
img_size = green_thumbnail.shape[0]
greenest_windows = []
for _ in range(n_windows):
green_totals = []
for window in windows:
left, top, right, bottom = (window * img_size).astype(int)
green_totals.append(np.sum(green_thumbnail[top:bottom, left:right]))
greenest_window = windows.pop(np.argmax(green_totals))
left, top, right, bottom = (greenest_window * img_size).astype(int)
green_thumbnail[top:bottom, left:right] *= 0.6
greenest_windows.append(greenest_window)
if len(windows) == 0:
break
img_size = rgby_img.shape[0]
# dot_size = max(round(img_size / 64), 1)
img_ids = []
source_img_ids = []
for i_window, window in enumerate(greenest_windows):
left, top, right, bottom = (window * img_size).astype(int)
if output_windowed_imgs_path is not None:
cropped_img = rgby_img[top:bottom, left:right]
image_id = f'{id_}_{i_window}'
img_ids.append(image_id)
source_img_ids.append(id_)
for i_channel, channel in enumerate(['red', 'green', 'blue', 'yellow']):
img_filename = f'{image_id}_{channel}{output_windowed_imgs_extension}'
cropped_img_channel = cropped_img[:, :, i_channel]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cropped_img_channel = img_as_ubyte(cropped_img_channel)
imsave(pjoin(output_windowed_imgs_path, img_filename), cropped_img_channel)
out_df = pd.DataFrame(np.array(greenest_windows), columns=['left', 'top', 'right', 'bottom'])
out_df.index = pd.Index(img_ids, name='img_id')
out_df['source_img_id'] = source_img_ids
return {
'id_': id_,
'df': out_df,
# 'blue_channel': (
# blue_channel,
# '\n'.join([
# f'{id_}',
# f'pct = {pct}',
# f'avg_of_brightest_5% = {avg_of_brightest}',
# ]),
# ),
# 'thresholded_img': (
# thresholded_img,
# '\n'.join([
# f'{id_}',
# f'ret = {ret}',
# f'th = {th}',
# ]),
# ),
# 'labeled_img': (
# labeled_img,
# '\n'.join([
# f'{id_}',
# f'ret = {ret}',
# f'greenest_windows = {greenest_windows}',
# ]),
# ),
}
except Exception as e:
debug(f'Error in processing {id_}')
print_exc()
return {
'id_': id_,
'df': pd.DataFrame({
'img_ids': id_,
'left': ['ERROR'],
'top': [str(e)],
}).set_index('img_ids'),
}
def crop(config):
if config['output_windowed_imgs_path'] is not None:
makedirs(config['output_windowed_imgs_path'], exist_ok=True)
if type(config['set_n_windows']) is str:
set_n_windows_anno = pd.read_csv(config['set_n_windows'], index_col=0)
n_classes = 28
xs = []
for target_str in set_n_windows_anno['Target']:
targets = str_to_labels(target_str)
x = np.zeros(n_classes, dtype='int')
x[targets] = 1
xs.append(x)
xx = np.array(xs)
n_samples_per_class = np.sum(xx, axis=0)
cut_summary = pd.DataFrame(
{
'organelle': class_labels,
'n_samples': n_samples_per_class,
'n_windows': np.round(1500 / n_samples_per_class).astype(int) + 1
},
index=range(n_classes),
)
print(cut_summary)
estimated_n_windows = np.sum(cut_summary['n_samples'].values * cut_summary['n_windows'].values)
print(f'estimated_n_windows = {estimated_n_windows}')
def determine_n_windows_fn(id_):
if type(config['set_n_windows']) is str:
targets = str_to_labels(set_n_windows_anno.loc[id_, 'Target'])
n_windows = np.max(cut_summary.iloc[targets]['n_windows'].values)
return n_windows
else:
return config['set_n_windows']
anno = config['anno'].copy()
anno['n_windows'] = [determine_n_windows_fn(id_) for id_ in anno.index]
crop_task_list = [{
'id_': id_,
'row': row,
'config': config,
} for id_, row in anno.iterrows()]
with Pool(config['n_threads']) as p:
result_iter = p.imap_unordered(crop_one_id, crop_task_list)
result_list = []
for i_result, result in enumerate(result_iter):
info(f"({i_result}/{len(crop_task_list)}) {result['id_']} -> ({len(result['df'])})")
result_list.append(result)
if config['output_windowed_imgs_path'] is not None:
windowed_anno = | pd.concat([x['df'] for x in result_list]) | pandas.concat |
# Remarks
#
# Download is don manually via a S3 client -> no more get requests
# Unzipping can be done without a script and should be done in the folders based on the id,
# so we dont build one big dataframe with allllll the dataaaa
#
# IMPORTS #
import pandas as pd # Data processing
import numpy as np # Data processing
import os
import warnings
import physi_calc # Physiological calculaction
import loc_clustering
from datetime import datetime
warnings.filterwarnings("ignore")
# GLOBAL VARIABLES #
current_dir = os.path.dirname(os.path.realpath(__file__))
# dir_name_unzipped = os.path.join(current_dir, 'unzipped/')
dir_name_unzipped = os.path.join(current_dir, 'max/unzipped/')
def run_preprocessing():
for folder in os.listdir(dir_name_unzipped):
folder = '{0}/'.format(folder)
measurements = {}
for i, file in enumerate(os.listdir(dir_name_unzipped + folder)):
if file.endswith(".json"):
temp = | pd.read_json(dir_name_unzipped + folder + file) | pandas.read_json |
import numpy as np
import pandas as pd
import hydrostats.data as hd
import hydrostats.analyze as ha
import hydrostats.visual as hv
import HydroErr as he
import matplotlib.pyplot as plt
import os
from netCDF4 import Dataset
# Put all the directories (different states and resolutions) and corresponding NetCDF files into lists.
list_of_files = []
list_of_dir = []
streamflow_dict = {}
list_streams = []
for i in os.listdir('/home/chrisedwards/Documents/rapid_output/mult_res_output'):
for j in os.listdir(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i)):
list_of_files.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j,
'Qout_erai_t511_24hr_19800101to20141231.nc'))
list_of_dir.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j))
list_of_dir.sort()
list_of_files.sort()
list_of_states=['az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col']
list_of_states.sort()
# Loop through the lists to create the csv for each stream, in each resolution.
for file, direc, state in zip(list_of_files, list_of_dir, list_of_states):
# Call the NetCDF file.
nc = Dataset(file)
nc.variables.keys()
nc.dimensions.keys()
# Define variables from the NetCDF file.
riv = nc.variables['rivid'][:].tolist()
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
Q = nc.variables['Qout'][:]
sQ = nc.variables['sQout'][:]
time = nc.variables['time'][:].tolist()
# Convert time from 'seconds since 1970' to the actual date.
dates = pd.to_datetime(time, unit='s', origin='unix')
temp_dictionary = {}
counter = 0
for n in riv:
str=state+'-{}'.format(n)
temp_dictionary['{}'.format(str)] = pd.DataFrame(data=Q[:, counter], index=dates, columns=[str])
streamflow_dict.update(temp_dictionary)
list_streams.append(str)
counter += 1
list_streams_condensed = list(set(list_streams))
list_streams_condensed.sort()
# Now there is a dictionary called 'streamflow_dict' that has the 35-yr time series stored in a pandas DataFrame.
# Each array has the datetime and flowrate.
# Each data frame is named in the format '{state}-{streamID}' (eg: 'az-7' or 'col-9').
# There are a total of 180 streams, or 180 keys in the dictionary: streamflow_dict['az-7']
# list_streams_condensed = list of all the stream names, or names of the data frames.
# ***********************************************************************************************************
# Extract specific dataframe for a specific stream. This only includes the watershed mouths.
az_lowres = streamflow_dict['az-9']
az_medres = streamflow_dict['az-21']
az_highres = streamflow_dict['az-69']
id_lowres = streamflow_dict['id-8']
id_medres = streamflow_dict['id-17']
id_highres = streamflow_dict['id-39']
mo_lowres = streamflow_dict['mo-7']
mo_medres = streamflow_dict['mo-15']
mo_highres = streamflow_dict['mo-43']
ny_lowres = streamflow_dict['ny-9']
ny_medres = streamflow_dict['ny-20']
ny_highres = streamflow_dict['ny-48']
or_lowres = streamflow_dict['or-7']
or_medres = streamflow_dict['or-16']
or_highres = streamflow_dict['or-51']
# This list holds all 18 DataFrames, which each hold th 35-yr Time Series Data
list_riv_mouth = [az_lowres, az_medres, az_highres, id_lowres, id_medres, id_highres,
mo_lowres, mo_medres, mo_highres, ny_lowres, ny_medres, ny_highres,
or_lowres, or_medres, or_highres]
# Extract specific dataframe for a specific stream. This section is for streams above the mouth.
az_60 = streamflow_dict['az-60']
mo_50 = streamflow_dict['mo-50']
mo_51 = streamflow_dict['mo-51']
ny_8 = streamflow_dict['ny-8']
ny_19 = streamflow_dict['ny-19']
ny_47 = streamflow_dict['ny-47']
ny_18 = streamflow_dict['ny-18']
ny_46 = streamflow_dict['ny-46']
or_20 = streamflow_dict['or-20']
or_58 = streamflow_dict['or-58']
or_15 = streamflow_dict['or-15']
or_47 = streamflow_dict['or-47']
# -----------------------------Format Gauge Data---------------------------------------------------
# Catchment Outlets:
az_obs_full = pd.read_csv('/home/chrisedwards/Documents/gauge_data/09494000_1-1-1980_12-31-2014.csv', index_col=0)
az_obs_cms = az_obs_full.drop(columns=["Flow-cfs", "Estimation"])
id_obs_full = pd.read_csv('/home/chrisedwards/Documents/gauge_data/13340600_1-1-1980_12-31-2014.csv', index_col=0)
id_obs_cms = id_obs_full.drop(columns=["Flow-cfs", "Estimation"])
mo_obs_full = pd.read_csv('/home/chrisedwards/Documents/gauge_data/07014500_1-1-1980_12-31-2014.csv', index_col=0)
mo_obs_cms = mo_obs_full.drop(columns=["Flow-cfs", "Estimation"])
ny_obs_full = pd.read_csv('/home/chrisedwards/Documents/gauge_data/01413500_1-1-1980_12-31-2014.csv', index_col=0)
ny_obs_cms = ny_obs_full.drop(columns=["Flow-cfs", "Estimation"])
or_obs_full = pd.read_csv('/home/chrisedwards/Documents/gauge_data/14306500_1-1-1980_12-31-2014.csv', index_col=0)
or_obs_cms = or_obs_full.drop(columns=["Flow-cfs", "Estimation"])
# Upstream Gauges (Not OUTLET):
az_60_obs = pd.read_csv('/home/chrisedwards/Documents/gauge_data/09492400_1-1-1980_12-31-2014.csv', index_col=0)
az_60_obs_cms = az_60_obs.drop(columns=["Flow-cfs", "Estimation"])
mo_50_obs = pd.read_csv('/home/chrisedwards/Documents/gauge_data/07013000_1-1-1980_12-31-2014.csv', index_col=0)
mo_50_obs_cms = mo_50_obs.drop(columns=["Flow-cfs", "Estimation"])
mo_51_obs = | pd.read_csv('/home/chrisedwards/Documents/gauge_data/07014000_3-5-2007_12-31-2014.csv', index_col=0) | pandas.read_csv |
# IMPORT
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import TransformerMixin, BaseEstimator
import re
import scipy
from scipy import sparse
import time
import gc
from IPython.display import display, HTML
from pprint import pprint
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
pd.options.display.max_colwidth=300
# Seed
pl.seed_everything(seed=42)
class JigsawRidgeModel:
def __init__(self,path,folds):
self.path=path
self.df_val=pd.read_csv(f"{self.path}/jigsaw-toxic-severity-rating/validation_data.csv")
self.df_sub=pd.read_csv(f"{self.path}/jigsaw-toxic-severity-rating/comments_to_score.csv")
self.folds=folds
def train_and_predict(self,file_prefix):
start_time = time.time()
val_preds_arr1 = np.zeros((self.df_val.shape[0], self.folds))
val_preds_arr2 = np.zeros((self.df_val.shape[0], self.folds))
test_preds_arr = np.zeros((self.df_sub.shape[0], self.folds))
for fld in tqdm(range(self.folds)):
print("\n\n")
print(f' ****************************** FOLD: {fld} ******************************')
df = pd.read_csv(f'{self.path}/folds/{file_prefix}{fld}.csv')
print(df.shape)
features = FeatureUnion([
("vect3", TfidfVectorizer(min_df= 3, max_df=0.5, analyzer = 'char_wb', ngram_range = (3,5))),
])
pipeline = Pipeline(
[
("features", features),
("clf", Ridge())
]
)
# Train the pipeline
pipeline.fit(df['text'].values.astype(str), df['y'])
feature_wts = sorted(list(zip(pipeline['features'].get_feature_names(),
np.round(pipeline['clf'].coef_,2) )),
key = lambda x:x[1],
reverse=True)
val_preds_arr1[:,fld] = pipeline.predict(self.df_val['less_toxic'])
val_preds_arr2[:,fld] = pipeline.predict(self.df_val['more_toxic'])
test_preds_arr[:,fld] = pipeline.predict(self.df_sub['text'])
print("\n--- %s seconds ---" % (time.time() - start_time))
return val_preds_arr1,val_preds_arr2, test_preds_arr
if __name__=="__main__":
trainer=JigsawRidgeModel("../input",7)
# jigsaw classification dataset
val_preds_arr1,val_preds_arr2, test_preds_arr=trainer.train_and_predict("df_fld")
d1=pd.DataFrame(val_preds_arr1)
d1.to_csv(f'../output/predictions/df1_1.csv', index=False)
d2=pd.DataFrame(val_preds_arr2)
d2.to_csv(f'../output/predictions/df1_2.csv', index=False)
d3=pd.DataFrame(test_preds_arr)
d3.to_csv(f'../output/predictions/df1_3.csv', index=False)
# jigsaw clean classification dataset
val_preds_arrc1,val_preds_arrc2, test_preds_arrc=trainer.train_and_predict("df_clean_fld")
d1=pd.DataFrame(val_preds_arrc1)
d1.to_csv(f'../output/predictions/df2_1.csv', index=False)
d2=pd.DataFrame(val_preds_arrc2)
d2.to_csv(f'../output/predictions/df2_2.csv', index=False)
d3=pd.DataFrame(test_preds_arrc)
d3.to_csv(f'../output/predictions/df2_3.csv', index=False)
# jigsaw ruddit dataset
val_preds_arr1r,val_preds_arr2r, test_preds_arrr=trainer.train_and_predict("df2_fld")
d1=pd.DataFrame(val_preds_arr1r)
d1.to_csv(f'../output/predictions/df3_1.csv', index=False)
d2=pd.DataFrame(val_preds_arr2r)
d2.to_csv(f'../output/predictions/df3_2.csv', index=False)
d3=pd.DataFrame(test_preds_arrr)
d3.to_csv(f'../output/predictions/df3_3.csv', index=False)
# jigsaw unhealthy comments dataset
val_preds_arr1u,val_preds_arr2u, test_preds_arru=trainer.train_and_predict("df3_fld")
d1= | pd.DataFrame(val_preds_arr1u) | pandas.DataFrame |
from collections import OrderedDict
import os
import logging.config
import logging
from bson.codec_options import CodecOptions
import time
import pymongo
import pandas as pd
import numpy as np
from originbar import OriginBar
from constant import *
class MainEngine(object):
"""
操作实例
"""
def __init__(self, futpath, mongoConf, logconfig):
logging.config.dictConfig(logconfig)
self.log = logging.getLogger('root')
self.mongoConf = mongoConf
self.futpath = futpath
self.path = OrderedDict() # 原始 CSV 数据的路径
self.collection = None
def init(self):
"""
:return:
"""
# 链接数据库
self.dbConnect()
# 加载文件路径
self.loaddir()
self.log.warning('10秒后开始导入数据!!!')
time.sleep(10)
def start(self):
"""
:return:
"""
count = 0
for year, originBars in self.path.items():
for ob in originBars:
# 清洗数据
if '主力' in ob.symbol:
# 暂时不处理 主力连续 和 次主力连续合约
continue
# 生成 DataFrame 数据
df = self.getBarDf(ob)
# # # 添加结算日
df = self.setTradingDay(df)
# 清洗没有成交的数据
df = self.clearNoTrader(df)
# 保存数据
documents = df.to_dict('records')
self.collection.insert_many(documents)
self.log.info("{} 保存了 {} 条数据到mongodb".format(ob.symbol, df.shape[0]))
count += df.shape[0]
time.sleep(1)
self.log.info('year {} 累积保存了 {} 条数据'.format(year, count))
def dbConnect(self):
mongoConf = self.mongoConf
db = pymongo.MongoClient(
mongoConf['host'],
mongoConf['port']
)[mongoConf['dbn']]
if mongoConf.get('username') is not None:
# 登陆授权
db.authenticate(mongoConf['username'], mongoConf['password'])
# 确认登陆
db.client.server_info()
# 设置 collection 的时区生效
self.collection = db[mongoConf['collection']].with_options(
codec_options=CodecOptions(tz_aware=True, tzinfo=LOCAL_TIMEZONE))
def loaddir(self):
"""
:return:
"""
root, dirs, files = next(os.walk(self.futpath))
dirs.sort()
for yearDir in dirs:
year = yearDir[-4:] # 年份
yearDirPath = os.path.join(root, yearDir) # 文件按年打包
yearDirPath, dirs, files = next(os.walk(yearDirPath))
originBars = []
self.path[year] = originBars
for f in files:
if not f.endswith('.csv'):
# 忽略非 csv 文件
continue
# if __debug__ and 'rb1710' not in f:
# # 调试指定合约
# continue
path = os.path.join(yearDirPath, f)
ob = OriginBar(year, path)
originBars.append(ob)
# if __debug__:
# self.log.warning('只取一个文件 {}'.format(ob.symbol))
# return
self.log.info('即将导入年份 {}'.format(str(list(self.path.keys()))))
def getBarDf(self, ob):
df = pd.read_csv(ob.path, encoding='GB18030')
# 去掉多余的字段
del df['市场代码']
del df['成交额']
df.columns = ['symbol', 'datetime', 'open', 'high', 'low', 'close', 'volume', 'openInterest']
df.datetime = pd.to_datetime(df.datetime)
df['date'] = df.datetime.apply(lambda dt: dt.strftime('%Y%m%d'))
df['time'] = df.datetime.apply(lambda dt: dt.time())
return df
def clearNoTrader(self, df):
"""
清洗没有成交的 bar
:param df:
:return:
"""
return df[df.volume != 0]
def setTradingDay(self, df):
# 白天的数据,就是交易日
td = df.time.apply(lambda t: DAY_TRADING_START_TIME <= t <= DAY_TRADING_END_TIME)
tds = []
for i, isTading in enumerate(td):
if isTading:
tds.append(df.date[i])
else:
tds.append(np.nan)
# 空白的bar,使用最近一个有效数据来作为交易日
tradingDay = | pd.Series(tds) | pandas.Series |
import unittest
import pandas as pd
from featurefilter import TargetCorrelationFilter
def test_low_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 0, 1, 1], 'Y': [0, 1, 0, 1]})
target_correlation_filter = TargetCorrelationFilter(target_column='Y')
train_df = target_correlation_filter.fit(train_df)
assert target_correlation_filter.columns_to_drop == ['A']
def test_high_negative_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 0], 'B': [1, 0], 'Y': [0, 1]})
test_df = pd.DataFrame({'A': [0, 1], 'B': [1, 1], 'Y': [0, 1]})
target_correlation_filter = TargetCorrelationFilter(target_column='Y')
train_df = target_correlation_filter.fit_transform(train_df)
test_df = target_correlation_filter.transform(test_df)
# Make sure column 'B' is dropped for both train and test set
# Also, column 'A' must not be dropped for the test set even though its
# correlation in the test set is above the threshold
assert train_df.equals(pd.DataFrame({'A': [0, 0], 'Y': [0, 1]}))
assert test_df.equals(pd.DataFrame({'A': [0, 1], 'Y': [0, 1]}))
def test_high_positive_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 0], 'B': [0, 1], 'Y': [0, 1]})
test_df = pd.DataFrame({'A': [0, 1], 'B': [1, 1], 'Y': [0, 1]})
target_correlation_filter = TargetCorrelationFilter(target_column='Y')
train_df = target_correlation_filter.fit_transform(train_df)
test_df = target_correlation_filter.transform(test_df)
# Make sure column 'B' is dropped for both train and test set
# Also, column 'A' must not be dropped for the test set even though its
# correlation in the test set is above the threshold
assert train_df.equals(pd.DataFrame({'A': [0, 0], 'Y': [0, 1]}))
assert test_df.equals(pd.DataFrame({'A': [0, 1], 'Y': [0, 1]}))
def test_low_categorical_correlation():
train_df = pd.DataFrame({'A': ['a', 'a'], 'B': ['b', 'a'], 'Y': [0, 1]})
test_df = pd.DataFrame({'A': ['a', 'b'], 'B': ['b', 'b'], 'Y': [0, 1]})
target_correlation_filter = TargetCorrelationFilter(target_column='Y')
train_df = target_correlation_filter.fit_transform(train_df)
test_df = target_correlation_filter.transform(test_df)
# Make sure column 'B' is dropped for both train and test set
# Also, column 'A' must not be dropped for the test set even though its
# correlation in the test set is above the threshold
assert train_df.equals( | pd.DataFrame({'A': ['a', 'a'], 'Y': [0, 1]}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME>
# @Version : 1.0.2
#
# ecom_finder
# Standard library imports
import re as __re__
import urllib as __ulp__
# Third party imports
import pandas as __pd__
import requests as __requests__
import speech_recognition as __sr__
from bs4 import BeautifulSoup as __BS__
# Get items list From amazon.in
# Inputs: text (Item Name)
# Outputs: pandas dataframe of items
def __get_items_from_amazon__(text):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0",
"Accept-Encoding": "gzip, deflate",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"DNT": "1",
"Connection": "close",
"Upgrade-Insecure-Requests": "1",
}
query = "https://www.amazon.in/s?" + __ulp__.parse.urlencode({"k": text.lower()})
code = __requests__.get(query, headers=headers)
s = __BS__(code.text, "html.parser")
items = s.findAll("div", attrs={"class": "a-section"})
alls = []
for item in items:
name = item.find(
"span", attrs={"class": "a-size-medium a-color-base a-text-normal"}
)
price = item.find("span", attrs={"class": "a-price-whole"})
url = item.find("a", attrs={"class": "a-link-normal a-text-normal"})
rating = item.find("span", attrs={"class": "a-icon-alt"})
if name == None or price == None:
continue
else:
alls.append(
{
"name": name.text,
"price": price.text,
"url": "https://amazon.com" + url["href"]
if url.has_attr("href")
else None,
"rating": float(rating.text.split(" ")[0])
if rating is not None
else None,
"provider": "Amazon",
}
)
return | __pd__.DataFrame(alls) | pandas.DataFrame |
import pandas as pd
import numpy as np
def load_and_process(url1, url2):
rec1data = (
| pd.read_csv("../data/processed/rec1data.csv") | pandas.read_csv |
import pytest
import pandas as pd
import numpy as np
from src.main import create_app
@pytest.fixture(scope="session")
def test_client():
flask_app = create_app(
db_config={"db": "mongoenginetest", "host": "mongomock://localhost"},
testing=True,
)
with flask_app.test_client() as testing_client:
with flask_app.app_context():
yield testing_client
def generate_temp_dataset():
data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = | MultiIndex.from_tuples(tups) | pandas.MultiIndex.from_tuples |
# coding: utf8
from collections import deque
from collections import Counter
# noinspection PyPackageRequirements
import pytest
from pandas import DataFrame
# noinspection PyProtectedMember
from dfqueue.core.dfqueue import QueuesHandler, QueueHandlerItem, QueueBehaviour
def test_singleton():
handler_a = QueuesHandler()
handler_b = QueuesHandler()
assert id(handler_a) != id(handler_b)
assert id(handler_a._QueuesHandler__instance) == id(handler_b._QueuesHandler__instance)
assert handler_a.default_queue_name == handler_b.default_queue_name
def test_valid_get_item():
handler = QueuesHandler()
default_queue_name = handler.default_queue_name
queue_data = handler[default_queue_name]
assert isinstance(queue_data, dict)
assert len(queue_data) == len(QueueHandlerItem)
assert all([item in queue_data for item in QueueHandlerItem])
assert isinstance(queue_data[QueueHandlerItem.QUEUE], deque)
assert queue_data[QueueHandlerItem.DATAFRAME] is None
assert isinstance(queue_data[QueueHandlerItem.MAX_SIZE], int)
def test_invalid_get_item():
handler = QueuesHandler()
invalid_queue_name = "UNKNOWN"
with pytest.raises(AssertionError):
handler[invalid_queue_name]
@pytest.mark.parametrize("queue_iterable,dataframe,max_size,counter,behaviour", [
(deque(), DataFrame(), 1, Counter(), QueueBehaviour.LAST_ITEM),
(deque((1, {"A": "a", "B": "b"})), DataFrame(), 1, {1: Counter({"A": 1, "B": 1})},
QueueBehaviour.ALL_ITEMS),
(deque(), | DataFrame() | pandas.DataFrame |
import psycopg2
from psycopg2.extras import execute_values
from psycopg2.extensions import register_adapter,AsIs
import numpy as np
import pandas as pd
from .utils import map_column_types
class clientPsql():
def __init__(
self,
host:str,
user:str,
password:str,
port:str='5432',
db_name:str = 'postgres'
) -> None:
#credentials
self.__host = host
self.__user = user
self.__password = password
self.__port=port
self.database=db_name
#register adapters
register_adapter(np.int64,AsIs)
register_adapter(np.bool_,AsIs)
def exec_query(
self,
query,
chunksize=1000
):
column_name = None
response = None
try:
conn = psycopg2.connect(
host=self.__host,
user=self.__user,
password=self.__password,
database=self.database,
)
with conn.cursor() as cur:
cur.itersize = chunksize
cur.execute(query)
if cur.description:
column_name = [desc[0] for desc in cur.description]
response = cur.fetchall()
conn.commit()
except (Exception, psycopg2.DatabaseError) as e:
print ('Error executing query: ',e)
conn = None
finally:
if conn is not None:
conn.close()
if column_name is not None:
try:
return pd.DataFrame(np.array(response),columns=column_name)
except:
return | pd.DataFrame(columns=column_name) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
df_strategy,
categoricaldf_strategy,
)
from janitor.functions import expand_grid
@given(df=df_strategy())
def test_others_not_dict(df):
"""Raise Error if `others` is not a dictionary."""
with pytest.raises(TypeError):
df.expand_grid("frame", others=[2, 3])
@given(df=df_strategy())
def test_others_none(df):
"""Return DataFrame if no `others`, and df exists."""
assert_frame_equal(df.expand_grid("df"), df)
def test_others_empty():
"""Return None if no `others`."""
assert (expand_grid(), None) # noqa : F631
@given(df=df_strategy())
def test_df_key(df):
"""Raise error if df exists and df_key is not supplied."""
with pytest.raises(KeyError):
expand_grid(df, others={"y": [5, 4, 3, 2, 1]})
@given(df=df_strategy())
def test_df_key_hashable(df):
"""Raise error if df exists and df_key is not Hashable."""
with pytest.raises(TypeError):
expand_grid(df, df_key=["a"], others={"y": [5, 4, 3, 2, 1]})
def test_numpy_zero_d():
"""Raise ValueError if numpy array dimension is zero."""
with pytest.raises(ValueError):
expand_grid(others={"x": np.array([], dtype=int)})
def test_numpy_gt_2d():
"""Raise ValueError if numpy array dimension is greater than 2."""
with pytest.raises(ValueError):
expand_grid(others={"x": np.array([[[2, 3]]])})
def test_series_empty():
"""Raise ValueError if Series is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": pd.Series([], dtype=int)})
def test_dataframe_empty():
"""Raise ValueError if DataFrame is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": pd.DataFrame([])})
def test_index_empty():
"""Raise ValueError if Index is empty."""
with pytest.raises(ValueError):
expand_grid(others={"x": pd.Index([], dtype=int)})
@settings(deadline=None)
@given(df=df_strategy())
def test_series(df):
"""Test expand_grid output for Series input."""
A = df["a"]
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_series_dataframe(df):
"""Test expand_grid output for Series and DataFrame inputs."""
A = df["a"]
B = df.iloc[:, [1, 2]]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_series_multiindex_dataframe(df):
"""
Test expand_grid output
if the DataFrame's columns is a MultiIndex.
"""
A = df["a"]
B = df.iloc[:, [1, 2]]
B.columns = pd.MultiIndex.from_arrays([["C", "D"], B.columns])
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B.columns = B.columns.map("_".join)
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_numpy_1d(df):
"""Test expand_grid output for a 1D numpy array."""
A = df["a"].to_numpy()
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]].rename(columns={"a": 0})
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=categoricaldf_strategy())
def test_numpy_2d(df):
"""Test expand_grid output for a 2D numpy array"""
A = df["names"]
base = df.loc[:, ["numbers"]].assign(num=df.numbers * 4)
B = base.to_numpy(dtype=int)
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["names"]]
B = base.set_axis([0, 1], axis=1)
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_index(df):
"""Test expand_grid output for a pandas Index that has a name."""
A = pd.Index(df["a"])
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_index_name_none(df):
"""Test expand_grid output for a pandas Index without a name."""
A = pd.Index(df["a"].array, name=None)
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays([["A", "B"], [0, "cities"]])
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=categoricaldf_strategy())
def test_multiindex(df):
"""Test expand_grid output for a pandas MultiIndex with a name."""
A = df["names"]
base = df.loc[:, ["numbers"]].assign(num=df.numbers * 4)
B = pd.MultiIndex.from_frame(base)
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["names"]]
B = base.copy()
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=categoricaldf_strategy())
def test_multiindex_names_none(df):
"""Test expand_grid output for a pandas MultiIndex without a name."""
A = df["names"]
base = df.loc[:, ["numbers"]].assign(num=df.numbers * 4)
B = pd.MultiIndex.from_frame(base, names=[None, None])
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["names"]]
B = base.copy()
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B", "B"], ["names", 0, 1]]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_pandas_extension_array(df):
"""Test expand_grid output for a pandas array."""
A = df["a"]
B = df["cities"].astype("string").array
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]].astype("string").set_axis([0], axis=1)
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_sequence(df):
"""Test expand_grid output for list."""
A = df["a"].to_list()
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]].rename(columns={"a": 0})
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays(
[["A", "B"], expected.columns]
)
assert_frame_equal(result, expected)
@settings(deadline=None)
@given(df=df_strategy())
def test_scalar(df):
"""Test expand_grid output for a scalar value."""
A = df["a"]
B = 2
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = | pd.DataFrame([2]) | pandas.DataFrame |
from typing import Any, List, Tuple, Dict, Union, Optional, cast
from warnings import warn
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.sparse import linalg
from chemicalc.reference_spectra import ReferenceSpectra, alpha_el
from chemicalc.instruments import InstConfig
def init_crlb_df(reference: ReferenceSpectra) -> pd.DataFrame:
"""
Initialized CRLB dataframe with indices corresponding to all the labels included
:param ReferenceSpectra reference: Reference star object (used to identify stellar labels)
:return pd.DataFrame: Empty CRLB dataframe
"""
if not isinstance(reference, ReferenceSpectra):
raise TypeError(
"reference must be a chemicalc.reference_spectra.ReferenceSpectra object"
)
return | pd.DataFrame(index=reference.labels.index) | pandas.DataFrame |
#Comenzamos cargando la libreria sys, csv, numpy
import sys
import csv
import numpy as np
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
#-----------ABRIR ARCHIVO----------------------------------
#leemos el archivo
cl_line = sys.argv
#Lineas usadas en el debug
OBS_file_name = 'acapulco_obs.txt'
FOR_file_name = 'acapulco_marpron.txt'
#OBS_file_name = cl_line[1]
#WRF_file_name = cl_line[2]
#Abrir los archivos
OBS_file = open(OBS_file_name, "r")
FOR_file = open(FOR_file_name, "r")
#Y ahora lo pasamos a una lista
obs_lines = OBS_file.readlines()
for_lines = FOR_file.readlines()
#Cerramos el archivo ya que no lo necesitamos
OBS_file.close()
FOR_file.close()
######################OBSERVATION SECTION#####################################################
#Make two empty lists to add info later
obs_list = []
date_list = []
#From this loop we get two lists, one containing all the info from the obs file in a list with floats, and one with the dates as a string for the time series
for line in obs_lines:
obs_lines_split = [x.strip() for x in line.split(' ')]
obs_lines_split = [float(i) for i in obs_lines_split]
obs_list.append(obs_lines_split)
date = '{}/{}/{} {}:{}'.format(int(obs_lines_split[0]),int(obs_lines_split[1]),int(obs_lines_split[2]),int(obs_lines_split[3]),int(obs_lines_split[4]))
date_list.append(date)
#Extract only the relevant elevation data, on column number 6 from the observation file.
elev = [obs_list[i][6] for i in range(len(obs_list))]
#Using the time string from before, create Timestamp objects to be used in the time series object. Note that this object does not need to have a consistent
#frequency on the dates (dates can have missing values).
dates = pd.to_datetime(date_list)
#Create the first time series object, allowing it to have missing values ("holes" in the data).
elev_series_original = pd.Series(elev, index=dates)
#############OBSERVATION DATE SECTION####################
#This section fixes the missing data issue, first filling the time series with nan's where there should be a value, and then using a forward value to fill the missing values.
#First create a date range with the desired frequency (this will have NO holes in it).
start_date = datetime(2015,1,1,0,0,0)
end_date = datetime(2015,12,31,23,0,0)
dates_full = pd.date_range(start_date,end_date,freq='H')
#Now "resample" the time series so it has the same lenght as the previously made date range list.
elev_series = elev_series_original.reindex(dates_full)
#This section is optional, only to find out how many nan values there are in the time series.
test = pd.isnull(elev_series)
indx = 0
for i in test:
if i:
indx = indx + 1
#This section fills the nan values with the closest existing value backwards. (Or as it's called, the forward method).
elev_series = elev_series.fillna(method='pad')
######################FORECAST SECTION#####################################################
#Make two empty lists to add info later
for_list = []
date_list = []
#From this loop we get two lists, one containing all the info from the for file in a list with floats, and one with the dates as a string for the time series
for line in for_lines:
for_lines_split = [x.strip() for x in line.split(' ')]
for_lines_split = [float(i) for i in for_lines_split]
for_list.append(for_lines_split)
date = '{}/{}/{} {}:{}'.format(int(for_lines_split[0]),int(for_lines_split[1]),int(for_lines_split[2]),int(for_lines_split[3]),int(for_lines_split[4]))
date_list.append(date)
#Extract only the relevant elevation data, on column number 6 from the forervation file.
elevfor = [for_list[i][6] for i in range(len(for_list))]
#Using the time string from before, create Timestamp objects to be used in the time series object. Note that this object does not need to have a consistent
#frequency on the dates (dates can have missing values).
dates = pd.to_datetime(date_list)
#Create the first time series object, allowing it to have missing values ("holes" in the data).
fore_series_original = pd.Series(elevfor, index=dates)
#############FORECAST DATE SECTION####################
#This section fixes the missing data issue, first filling the time series with nan's where there should be a value, and then using a forward value to fill the missing values.
#First create a date range with the desired frequency (this will have NO holes in it).
start_date = datetime(2015,1,1,0,0,0)
end_date = datetime(2015,12,31,23,0,0)
dates_full = | pd.date_range(start_date,end_date,freq='H') | pandas.date_range |
#!/usr/bin/python
import unittest
import cv2
import numpy as np
import os
import pandas as pd
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from PIE import track_colonies
# load in a test timecourse colony property dataframe
# NB: this case is quite pathological, preliminary analysis on bad
# images, with poor tracking, but this is probably better for testing
timecourse_colony_prop_df = \
pd.read_csv(os.path.join('tests','test_ims',
'SL_170619_2_GR_small_xy0001_phase_colony_data_tracked.csv'),
index_col = 0)
satellite_prop_df = \
pd.read_csv(os.path.join('tests','test_ims',
'test_sat_data.csv'),
index_col = 0)
class TestGetOverlap(unittest.TestCase):
'''
Tests getting overlap of colonies between current and next timepoint
'''
def setUp(self):
self.colony_tracker = \
track_colonies.ColonyTracker()
self.colony_tracker.perform_registration = True
def test_get_overlap_t5t6(self):
'''
Tests finding overlap between timepoints 5 and 6 of
timecourse_colony_prop_df
Checked against results of previous matlab code (and manual
confirmation of most rows)
'''
tp_5_data = \
timecourse_colony_prop_df[
timecourse_colony_prop_df.timepoint == 5]
# get colony properties at next timepoint
tp_6_data = \
timecourse_colony_prop_df[
timecourse_colony_prop_df.timepoint == 6]
expected_overlap_df = pd.DataFrame(
np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
dtype = bool),
index = tp_5_data.index, columns = tp_6_data.index)
test_overlap_df = \
self.colony_tracker._get_overlap(tp_5_data, tp_6_data)
| assert_frame_equal(expected_overlap_df, test_overlap_df) | pandas.testing.assert_frame_equal |
import unittest
import pandas as pd
import os
from functools import partial
from StyleFrame import Container, StyleFrame, Styler, utils
from StyleFrame.tests import TEST_FILENAME
class StyleFrameTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.default_styler_obj = Styler(wrap_text=False)
cls.styler_obj_1 = Styler(bg_color=utils.colors.blue, bold=True, font='Impact', font_color=utils.colors.yellow,
font_size=20, underline=utils.underline.single,
horizontal_alignment=utils.horizontal_alignments.left,
vertical_alignment=utils.vertical_alignments.center,
comment_text='styler_obj_1 comment')
cls.styler_obj_2 = Styler(bg_color=utils.colors.yellow, comment_text='styler_obj_2 comment')
cls.openpy_style_obj_1 = cls.styler_obj_1.to_openpyxl_style()._style
cls.openpy_style_obj_2 = cls.styler_obj_2.to_openpyxl_style()._style
def setUp(self):
self.ew = StyleFrame.ExcelWriter(TEST_FILENAME)
self.sf = StyleFrame({'a': ['col_a_row_1', 'col_a_row_2', 'col_a_row_3'],
'b': ['col_b_row_1', 'col_b_row_2', 'col_b_row_3']}, self.default_styler_obj)
self.apply_column_style = partial(self.sf.apply_column_style, styler_obj=self.styler_obj_1, width=10)
self.apply_style_by_indexes = partial(self.sf.apply_style_by_indexes, styler_obj=self.styler_obj_1, height=10)
self.apply_headers_style = partial(self.sf.apply_headers_style, styler_obj=self.styler_obj_1)
@classmethod
def tearDownClass(cls):
try:
os.remove(TEST_FILENAME)
except OSError as ex:
print(ex)
def export_and_get_default_sheet(self, save=False):
self.sf.to_excel(excel_writer=self.ew, right_to_left=True, columns_to_hide=self.sf.columns[0],
row_to_add_filters=0, columns_and_rows_to_freeze='A2', allow_protection=True)
if save:
self.ew.save()
return self.ew.sheets['Sheet1']
def get_cf_rules(self, sheet):
conditional_formatting = sheet.conditional_formatting
try:
return conditional_formatting.cf_rules
except AttributeError:
return conditional_formatting
def test_init_styler_obj(self):
self.sf = StyleFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, styler_obj=self.styler_obj_1)
self.assertTrue(all(self.sf.at[index, 'a'].style.to_openpyxl_style()._style == self.openpy_style_obj_1
for index in self.sf.index))
sheet = self.export_and_get_default_sheet()
self.assertTrue(all(sheet.cell(row=i, column=j)._style == self.openpy_style_obj_1
for i in range(2, len(self.sf))
for j in range(1, len(self.sf.columns))))
with self.assertRaises(TypeError):
StyleFrame({}, styler_obj=1)
def test_init_dataframe(self):
self.assertIsInstance(StyleFrame(pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})), StyleFrame)
self.assertIsInstance(StyleFrame( | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def load_data():
source_1 = pd.read_csv('data/example_gtex_train_data_1.zip', compression='zip', index_col='sample_id')
source_2 = pd.read_csv('data/example_gtex_train_data_2.zip', compression='zip', index_col='sample_id')
source = pd.concat([source_1, source_2], axis=0).values.astype('float32')
target = pd.read_csv('data/example_tcga_test_data.zip', compression='zip', index_col='sample_id').values.astype('float32')
bias = | pd.read_csv('data/example_sra_train_data.zip', compression='zip', index_col='sample_id') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
import json
import argparse
import os
import sys
from typing import List, Dict, Any
from ast import literal_eval
"""
Plot heatmap or violinplot of tactics and vendors
"""
CPE_ID_NORB_ID_PATH = "NORB/original_id_to_norb_id/cpe_id_norb_id.json"
def make_intensity_array(tactics, vendors, tactic_ids, tactic_vendor_products):
intensity_array = np.zeros((len(vendors),len(tactics)))
for tactic in tactic_vendor_products:
for vendor in tactic_vendor_products[tactic]:
products = tactic_vendor_products[tactic][vendor]
num_products = len(products)
intensity_array[vendors.index(vendor)][tactic_ids.index(tactic)] = num_products
return intensity_array
def norb_id_to_cpe_id(NORB_folder_path):
NORB_cpe_id_path = os.path.join(NORB_folder_path, CPE_ID_NORB_ID_PATH)
with open(NORB_cpe_id_path) as f:
cpe_id_norb_id = json.load(f)
norb_id_to_cpe_id = dict()
for cpe_id, norb_id in cpe_id_norb_id.items():
norb_id_to_cpe_id[f"cpe_{norb_id}"] = cpe_id
return norb_id_to_cpe_id
def analyze_tactic_result(vendors, tactic_result, norb_id_to_cpe_id):
df = pd.read_csv(tactic_result, usecols=["tactic", "cpe"])
tactic_vendor_products = dict()
for row_index in df.index[:-1]:
tactic = literal_eval(df["tactic"][row_index]).pop()
cpes = set()
entry = df["cpe"][row_index]
if entry != "set()":
cpes = literal_eval(entry)
cpe_ids = find_cpe_ids(cpes, norb_id_to_cpe_id)
vendor_products = find_vendor_cpes(vendors, cpe_ids)
tactic_vendor_products[tactic] = vendor_products
return tactic_vendor_products
def find_vendor_cpes(vendors, cpe_ids):
vendor_products = dict()
for vendor in vendors:
vendor_products[vendor] = set()
for cpe_id in cpe_ids:
parsed = cpe_id.split(':', 5)
vendor = parsed[3]
product = parsed[4]
if vendor in vendors:
vendor_products[vendor].add(product)
return vendor_products
def find_cpe_ids(cpes, norb_id_to_cpe_id):
cpe_ids = set()
for norb_id in cpes:
cpe_ids.add(norb_id_to_cpe_id[norb_id])
return cpe_ids
def make_heat_map(tactics, vendors, tactic_ids, tactic_search_result, norb_id_to_cpe_id, save_path=None):
plt.rc('font', size=12)
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
tactic_vendor_products = analyze_tactic_result(vendors, tactic_search_result, norb_id_to_cpe_id)
intensity_array = make_intensity_array(tactics, vendors, tactic_ids, tactic_vendor_products)
labels = np.asarray([[int(intensity_array[row, col]) for col in range(len(tactics))] for row in range(len(vendors))])
comma_fmt = FuncFormatter(lambda x, p: format(int(x), ','))
heatmap = sns.heatmap(intensity_array, cmap='magma_r', xticklabels=tactics, yticklabels=vendors, annot=labels, fmt='', annot_kws={'size':10}, cbar_kws={'format':comma_fmt})
# heatmap.set_xticklabels(heatmap.get_xticklabels(), rotation=45, horizontalalignment='right')
for t in heatmap.texts:
t.set_text('{:,d}'.format(int(t.get_text())))
heatmap.set(xlabel="Tactics", ylabel="Vendors")
heatmap.tick_params(which='both', width=2)
heatmap.tick_params(which='major', length=7)
heatmap.tick_params(which='minor', length=4)
b, t = plt.ylim()
b += 0.5
t -= 0.5
plt.ylim(b, t)
plt.tight_layout()
fig = heatmap.get_figure()
if save_path is None:
plt.show()
else:
fig.savefig(save_path, dpi=400)
def cve_to_risk(cve_summary):
cve_to_risk_dict = dict()
df = | pd.read_csv(cve_summary, usecols=["node_name", "metadata"]) | pandas.read_csv |
import unittest
from unittest.mock import patch, PropertyMock
import time
import mt5_correlation.correlation as correlation
import pandas as pd
from datetime import datetime, timedelta
from test_mt5 import Symbol
import random
import os
class TestCorrelation(unittest.TestCase):
# Mock symbols. 4 Symbols, 3 visible.
mock_symbols = [Symbol(name='SYMBOL1', visible=True),
Symbol(name='SYMBOL2', visible=True),
Symbol(name='SYMBOL3', visible=False),
Symbol(name='SYMBOL4', visible=True),
Symbol(name='SYMBOL5', visible=True)]
# Start and end date for price data and mock prices: base; correlated; and uncorrelated.
start_date = None
end_date = None
price_columns = None
mock_base_prices = None
mock_correlated_prices = None
mock_uncorrelated_prices = None
def setUp(self):
"""
Creates some price data fro use in tests
:return:
"""
# Start and end date for price data and mock price dataframes. One for: base; correlated; uncorrelated and
# different dates.
self.start_date = datetime(2021, 1, 1, 1, 5, 0)
self.end_date = datetime(2021, 1, 1, 11, 30, 0)
self.price_columns = ['time', 'close']
self.mock_base_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_uncorrelated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_different_dates = pd.DataFrame(columns=self.price_columns)
self.mock_inverse_correlated_prices = pd.DataFrame(columns=self.price_columns)
# Build the price data for the test. One price every 5 minutes for 500 rows. Base will use min for price,
# correlated will use min + 5 and uncorrelated will use random
for date in (self.start_date + timedelta(minutes=m) for m in range(0, 500*5, 5)):
self.mock_base_prices = self.mock_base_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute]]))
self.mock_correlated_prices = \
self.mock_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute + 5]]))
self.mock_uncorrelated_prices = \
self.mock_uncorrelated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, random.randint(0, 1000000)]]))
self.mock_correlated_different_dates = \
self.mock_correlated_different_dates.append(pd.DataFrame(columns=self.price_columns,
data=[[date + timedelta(minutes=100),
date.minute + 5]]))
self.mock_inverse_correlated_prices = \
self.mock_inverse_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, (date.minute + 5) * -1]]))
@patch('mt5_correlation.mt5.MetaTrader5')
def test_calculate(self, mock):
"""
Test the calculate method. Uses mock for MT5 symbols and prices.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Correlation class
cor = correlation.Correlation(monitoring_threshold=1, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We don't have a SYMBOL3 as this is set as not visible. Correlations should be as follows:
# SYMBOL1:SYMBOL2 should be fully correlated (1)
# SYMBOL1:SYMBOL4 should be uncorrelated (0)
# SYMBOL1:SYMBOL5 should be negatively correlated
# SYMBOL2:SYMBOL5 should be negatively correlated
# We will not use p_value as the last set uses random numbers so p value will not be useful.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_uncorrelated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Test the output. We should have 6 rows. S1:S2 c=1, S1:S4 c<1, S1:S5 c=-1, S2:S5 c=-1. We are not checking
# S2:S4 or S4:S5
self.assertEqual(len(cor.coefficient_data.index), 6, "There should be six correlations rows calculated.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL2'), 1,
"The correlation for SYMBOL1:SYMBOL2 should be 1.")
self.assertTrue(cor.get_base_coefficient('SYMBOL1', 'SYMBOL4') < 1,
"The correlation for SYMBOL1:SYMBOL4 should be <1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL5'), -1,
"The correlation for SYMBOL1:SYMBOL5 should be -1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL2', 'SYMBOL5'), -1,
"The correlation for SYMBOL2:SYMBOL5 should be -1.")
# Monitoring threshold is 1 and we are monitoring inverse. Get filtered correlations. There should be 3 (S1:S2,
# S1:S5 and S2:S5)
self.assertEqual(len(cor.filtered_coefficient_data.index), 3,
"There should be 3 rows in filtered coefficient data when we are monitoring inverse "
"correlations.")
# Now aren't monitoring inverse correlations. There should only be one correlation when filtered
cor.monitor_inverse = False
self.assertEqual(len(cor.filtered_coefficient_data.index), 1,
"There should be only 1 rows in filtered coefficient data when we are not monitoring inverse "
"correlations.")
# Now were going to recalculate, but this time SYMBOL1:SYMBOL2 will have non overlapping dates and coefficient
# should be None. There shouldn't be a row. We should have correlations for S1:S4, S1:S5 and S4:S5
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_different_dates,
self.mock_correlated_prices, self.mock_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
self.assertEqual(len(cor.coefficient_data.index), 3, "There should be three correlations rows calculated.")
self.assertEqual(cor.coefficient_data.iloc[0, 2], 1, "The correlation for SYMBOL1:SYMBOL4 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[1, 2], 1, "The correlation for SYMBOL1:SYMBOL5 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[2, 2], 1, "The correlation for SYMBOL4:SYMBOL5 should be 1.")
# Get the price data used to calculate the coefficients for symbol 1. It should match mock_base_prices.
price_data = cor.get_price_data('SYMBOL1')
self.assertTrue(price_data.equals(self.mock_base_prices), "Price data returned post calculation should match "
"mock price data.")
def test_calculate_coefficient(self):
"""
Tests the coefficient calculation.
:return:
"""
# Correlation class
cor = correlation.Correlation()
# Test 2 correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_prices)
self.assertEqual(coefficient, 1, "Coefficient should be 1.")
# Test 2 uncorrelated sets. Set p value to 1 to force correlation to be returned.
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_uncorrelated_prices, max_p_value=1)
self.assertTrue(coefficient < 1, "Coefficient should be < 1.")
# Test 2 sets where prices dont overlap
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_different_dates)
self.assertTrue(coefficient < 1, "Coefficient should be None.")
# Test 2 inversely correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_inverse_correlated_prices)
self.assertEqual(coefficient, -1, "Coefficient should be -1.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_get_ticks(self, mock):
"""
Test that caching works. For the purpose of this test, we can use price data rather than tick data.
Mock 2 different sets of prices. Get three times. Base, One within cache threshold and one outside. Set 1
should match set 2 but differ from set 3.
:param mock:
:return:
"""
# Correlation class to test
cor = correlation.Correlation()
# Mock the tick data to contain 2 different sets. Then get twice. They should match as the data was cached.
mock.copy_ticks_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices]
# We need to start and stop the monitor as this will set the cache time
cor.start_monitor(interval=10, calculation_params={'from': 10, 'min_prices': 0, 'max_set_size_diff_pct': 0,
'overlap_pct': 0, 'max_p_value': 1}, cache_time=3)
cor.stop_monitor()
# Get the ticks within cache time and check that they match
base_ticks = cor.get_ticks('SYMBOL1', None, None)
cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(base_ticks.equals(cached_ticks),
"Both sets of tick data should match as set 2 came from cache.")
# Wait 3 seconds
time.sleep(3)
# Retrieve again. This one should be different as the cache has expired.
non_cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(not base_ticks.equals(non_cached_ticks),
"Both sets of tick data should differ as cached data had expired.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_start_monitor(self, mock):
"""
Test that starting the monitor and running for 2 seconds produces two sets of coefficient history when using an
interval of 1 second.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Create correlation class. We will set a divergence threshold so that we can test status.
cor = correlation.Correlation(divergence_threshold=0.8, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We dont have a SYMBOL2 as this is set as not visible. All pairs should be correlated for the purpose of this
# test.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# We will build some tick data for each symbol and patch it in. Tick data will be from 10 seconds ago to now.
# We only need to patch in one set of tick data for each symbol as it will be cached.
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s2 = pd.DataFrame(columns=columns)
tick_data_s4 = pd.DataFrame(columns=columns)
tick_data_s5 = pd.DataFrame(columns=columns)
now = datetime.now()
price_base = 1
while starttime < now:
tick_data_s1 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.5]]))
tick_data_s2 = tick_data_s1.append( | pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.1]]) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#import networkx as nx
from scIB.utils import *
from scIB.preprocessing import score_cell_cycle
from scIB.clustering import opt_louvain
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from scipy.io import mmwrite
import sklearn
import sklearn.metrics
from time import time
import cProfile
from pstats import Stats
import memory_profiler
import itertools
import multiprocessing
import subprocess
import tempfile
import pathlib
from os import mkdir, path, remove, stat
import gc
import rpy2.rinterface_lib.callbacks
import logging
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Ignore R warning messages
import rpy2.robjects as ro
import anndata2ri
# Define Errors
class RootCellError(Exception):
def __init__(self, message):
self.message = message
class NeighborsError(Exception):
def __init__(self, message):
self.message = message
### Silhouette score
def silhouette(adata, group_key, metric='euclidean', embed='X_pca', scale=True):
"""
wrapper for sklearn silhouette function values range from [-1, 1] with 1 being an ideal fit, 0 indicating overlapping clusters and -1 indicating misclassified cells
"""
if embed not in adata.obsm.keys():
print(adata.obsm.keys())
raise KeyError(f'{embed} not in obsm')
asw = sklearn.metrics.silhouette_score(adata.obsm[embed], adata.obs[group_key], metric=metric)
if scale:
asw = (asw + 1)/2
return asw
def silhouette_batch(adata, batch_key, group_key, metric='euclidean',
embed='X_pca', verbose=True, scale=True):
"""
Silhouette score of batch labels subsetted for each group.
params:
batch_key: batches to be compared against
group_key: group labels to be subsetted by e.g. cell type
metric: see sklearn silhouette score
embed: name of column in adata.obsm
returns:
all scores: absolute silhouette scores per group label
group means: if `mean=True`
"""
if embed not in adata.obsm.keys():
print(adata.obsm.keys())
raise KeyError(f'{embed} not in obsm')
sil_all = pd.DataFrame(columns=['group', 'silhouette_score'])
for group in adata.obs[group_key].unique():
adata_group = adata[adata.obs[group_key] == group]
if adata_group.obs[batch_key].nunique() == 1:
continue
sil_per_group = sklearn.metrics.silhouette_samples(adata_group.obsm[embed], adata_group.obs[batch_key],
metric=metric)
# take only absolute value
sil_per_group = [abs(i) for i in sil_per_group]
if scale:
# scale s.t. highest number is optimal
sil_per_group = [1 - i for i in sil_per_group]
d = pd.DataFrame({'group' : [group]*len(sil_per_group), 'silhouette_score' : sil_per_group})
sil_all = sil_all.append(d)
sil_all = sil_all.reset_index(drop=True)
sil_means = sil_all.groupby('group').mean()
if verbose:
print(f'mean silhouette per cell: {sil_means}')
return sil_all, sil_means
def plot_silhouette_score(adata_dict, batch_key, group_key, metric='euclidean',
embed='X_pca', palette='Dark2', per_group=False, verbose=True):
"""
params:
adata_dict: dictionary of adata objects, each labeled by e.g. integration method name
"""
with sns.color_palette(palette):
for label, adata in adata_dict.items():
checkAdata(adata)
sil_scores = silhouette(adata,
batch_key=batch_key,
group_key=group_key,
metric=metric,
embed=embed,
means=False,
verbose=verbose)
sns.distplot(sil_scores['silhouette_score'], label=label, hist=False)
plt.title('Silhouette scores per cell for all groups')
plt.show()
if per_group:
for data_set, adata in adata_dict.items():
sil_scores = silhouette(adata,
batch_key=batch_key,
group_key=group_key,
metric=metric,
embed=embed,
means=False,
verbose=verbose)
# plot for all groups
for group in sil_scores['group'].unique():
group_scores = sil_scores[sil_scores['group'] == group]
sns.distplot(group_scores['silhouette_score'], label=group, hist=False)
plt.title(f'Silhouette scores per cell for {data_set}')
plt.show()
### NMI normalised mutual information
def nmi(adata, group1, group2, method="arithmetic", nmi_dir=None):
"""
Normalized mutual information NMI based on 2 different cluster assignments `group1` and `group2`
params:
adata: Anndata object
group1: column name of `adata.obs` or group assignment
group2: column name of `adata.obs` or group assignment
method: NMI implementation
'max': scikit method with `average_method='max'`
'min': scikit method with `average_method='min'`
'geometric': scikit method with `average_method='geometric'`
'arithmetic': scikit method with `average_method='arithmetic'`
'Lancichinetti': implementation by <NAME> 2009 et al.
'ONMI': implementation by <NAME> et al. (https://github.com/aaronmcdaid/Overlapping-NMI) Hurley 2011
nmi_dir: directory of compiled C code if 'Lancichinetti' or 'ONMI' are specified as `method`. Compilation should be done as specified in the corresponding README.
return:
normalized mutual information (NMI)
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
# choose method
if method in ['max', 'min', 'geometric', 'arithmetic']:
nmi_value = sklearn.metrics.normalized_mutual_info_score(group1, group2, average_method=method)
elif method == "Lancichinetti":
nmi_value = nmi_Lanc(group1, group2, nmi_dir=nmi_dir)
elif method == "ONMI":
nmi_value = onmi(group1, group2, nmi_dir=nmi_dir)
else:
raise ValueError(f"Method {method} not valid")
return nmi_value
def onmi(group1, group2, nmi_dir=None, verbose=True):
"""
Based on implementation https://github.com/aaronmcdaid/Overlapping-NMI
publication: <NAME>, <NAME>, <NAME> 2011
params:
nmi_dir: directory of compiled C code
"""
if nmi_dir is None:
raise FileNotFoundError("Please provide the directory of the compiled C code from https://sites.google.com/site/andrealancichinetti/mutual3.tar.gz")
group1_file = write_tmp_labels(group1, to_int=False)
group2_file = write_tmp_labels(group2, to_int=False)
nmi_call = subprocess.Popen(
[nmi_dir+"onmi", group1_file, group2_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = nmi_call.communicate()
if stderr:
print(stderr)
nmi_out = stdout.decode()
if verbose:
print(nmi_out)
nmi_split = [x.strip().split('\t') for x in nmi_out.split('\n')]
nmi_max = float(nmi_split[0][1])
# remove temporary files
remove(group1_file)
remove(group2_file)
return nmi_max
def nmi_Lanc(group1, group2, nmi_dir="external/mutual3/", verbose=True):
"""
paper by <NAME> 2009
https://sites.google.com/site/andrealancichinetti/mutual
recommended by Malte
"""
if nmi_dir is None:
raise FileNotFoundError("Please provide the directory of the compiled C code from https://sites.google.com/site/andrealancichinetti/mutual3.tar.gz")
group1_file = write_tmp_labels(group1, to_int=False)
group2_file = write_tmp_labels(group2, to_int=False)
nmi_call = subprocess.Popen(
[nmi_dir+"mutual", group1_file, group2_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = nmi_call.communicate()
if stderr:
print(stderr)
nmi_out = stdout.decode().strip()
return float(nmi_out.split('\t')[1])
def write_tmp_labels(group_assignments, to_int=False, delim='\n'):
"""
write the values of a specific obs column into a temporary file in text format
needed for external C NMI implementations (onmi and nmi_Lanc functions), because they require files as input
params:
to_int: rename the unique column entries by integers in range(1,len(group_assignments)+1)
"""
if to_int:
label_map = {}
i = 1
for label in set(group_assignments):
label_map[label] = i
i += 1
labels = delim.join([str(label_map[name]) for name in group_assignments])
else:
labels = delim.join([str(name) for name in group_assignments])
clusters = {label:[] for label in set(group_assignments)}
for i, label in enumerate(group_assignments):
clusters[label].append(str(i))
output = '\n'.join([' '.join(c) for c in clusters.values()])
output = str.encode(output)
# write to file
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(output)
filename = f.name
return filename
### ARI adjusted rand index
def ari(adata, group1, group2):
"""
params:
adata: anndata object
group1: ground-truth cluster assignments (e.g. cell type labels)
group2: "predicted" cluster assignments
The function is symmetric, so group1 and group2 can be switched
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
return sklearn.metrics.cluster.adjusted_rand_score(group1, group2)
### Isolated label score
def isolated_labels(adata, label_key, batch_key, cluster_key="iso_cluster",
cluster=True, n=None, all_=False, verbose=True):
"""
score how well labels of isolated labels are distiguished in the dataset by
1. clustering-based approach
2. silhouette score
params:
cluster: if True, use clustering approach, otherwise use silhouette score approach
n: max number of batches per label for label to be considered as isolated.
if n is integer, consider labels that are present for n batches as isolated
if n=None, consider minimum number of batches that labels are present in
all_: return scores for all isolated labels instead of aggregated mean
return:
by default, mean of scores for each isolated label
retrieve dictionary of scores for each label if `all_` is specified
"""
scores = {}
isolated_labels = get_isolated_labels(adata, label_key, batch_key, cluster_key, n=n, verbose=verbose)
for label in isolated_labels:
score = score_isolated_label(adata, label_key, cluster_key, label, cluster=cluster, verbose=verbose)
scores[label] = score
if all_:
return scores
return np.mean(list(scores.values()))
def get_isolated_labels(adata, label_key, batch_key, cluster_key, n, verbose):
"""
get labels that are considered isolated by the number of batches
"""
tmp = adata.obs[[label_key, batch_key]].drop_duplicates()
batch_per_lab = tmp.groupby(label_key).agg({batch_key: "count"})
# threshold for determining when label is considered isolated
if n is None:
n = batch_per_lab.min().tolist()[0]
if verbose:
print(f"isolated labels: no more than {n} batches per label")
labels = batch_per_lab[batch_per_lab[batch_key] <= n].index.tolist()
if len(labels) == 0 and verbose:
print(f"no isolated labels with less than {n} batches")
return labels
def score_isolated_label(adata, label_key, cluster_key, label, cluster=True, verbose=False, **kwargs):
"""
compute label score for a single label
params:
cluster: if True, use clustering approach, otherwise use silhouette score approach
"""
adata_tmp = adata.copy()
def max_label_per_batch(adata, label_key, cluster_key, label, argmax=False):
"""cluster optimizing over cluster with largest number of isolated label per batch"""
sub = adata.obs[adata.obs[label_key] == label].copy()
label_counts = sub[cluster_key].value_counts()
if argmax:
return label_counts.index[label_counts.argmax()]
return label_counts.max()
def max_f1(adata, label_key, cluster_key, label, argmax=False):
"""cluster optimizing over largest F1 score of isolated label"""
obs = adata.obs
max_cluster = None
max_f1 = 0
for cluster in obs[cluster_key].unique():
y_pred = obs[cluster_key] == cluster
y_true = obs[label_key] == label
f1 = sklearn.metrics.f1_score(y_pred, y_true)
if f1 > max_f1:
max_f1 = f1
max_cluster = cluster
if argmax:
return max_cluster
return max_f1
if cluster:
opt_louvain(adata_tmp, label_key, cluster_key, function=max_f1, label=label, verbose=False, inplace=True)
score = max_f1(adata_tmp, label_key, cluster_key, label, argmax=False)
else:
adata_tmp.obs['group'] = adata_tmp.obs[label_key] == label
score = silhouette(adata_tmp, group_key='group', **kwargs)
del adata_tmp
if verbose:
print(f"{label}: {score}")
return score
def precompute_hvg_batch(adata, batch, features, n_hvg=500, save_hvg=False):
adata_list = splitBatches(adata, batch, hvg=features)
hvg_dir = {}
for i in adata_list:
sc.pp.filter_genes(i, min_cells=1)
n_hvg_tmp = np.minimum(n_hvg, int(0.5*i.n_vars))
if n_hvg_tmp<n_hvg:
print(i.obs[batch][0]+' has less than the specified number of genes')
print('Number of genes: '+str(i.n_vars))
hvg = sc.pp.highly_variable_genes(i, flavor='cell_ranger', n_top_genes=n_hvg_tmp, inplace=False)
hvg_dir[i.obs[batch][0]] = i.var.index[hvg['highly_variable']]
adata_list=None
if save_hvg:
adata.uns['hvg_before']=hvg_dir
else:
return hvg_dir
### Highly Variable Genes conservation
def hvg_overlap(adata_pre, adata_post, batch, n_hvg=500):
hvg_post = adata_post.var_names
adata_post_list = splitBatches(adata_post, batch)
overlap = []
if ('hvg_before' in adata_pre.uns_keys()) and (set(hvg_post) == set(adata_pre.var_names)):
print('Using precomputed hvgs per batch')
hvg_pre_list = adata_pre.uns['hvg_before']
else:
hvg_pre_list = precompute_hvg_batch(adata_pre, batch, hvg_post)
for i in range(len(adata_post_list)):#range(len(adata_pre_list)):
sc.pp.filter_genes(adata_post_list[i], min_cells=1) # remove genes unexpressed (otherwise hvg might break)
#ov = list(set(adata_pre_list[i].var_names).intersection(set(hvg_pre_list[i])))
#adata_pre_list[i] = adata_pre_list[i][:,ov]
#adata_post_list[i] = adata_post_list[i][:,ov]
batch_var = adata_post_list[i].obs[batch][0]
n_hvg_tmp = len(hvg_pre_list[batch_var])#adata_pre.uns['n_hvg'][hvg_post]#np.minimum(n_hvg, int(0.5*adata_post_list[i].n_vars))
print(n_hvg_tmp)
#if n_hvg_tmp<n_hvg:
# print(adata_post_list[i].obs[batch][0]+' has less than the specified number of genes')
# print('Number of genes: '+str(adata_post_list[i].n_vars))
#hvg_pre = sc.pp.highly_variable_genes(adata_pre_list[i], flavor='cell_ranger', n_top_genes=n_hvg_tmp, inplace=False)
tmp_pre = hvg_pre_list[batch_var] #adata_pre_list[i].var.index[hvg_pre['highly_variable']]
hvg_post = sc.pp.highly_variable_genes(adata_post_list[i], flavor='cell_ranger', n_top_genes=n_hvg_tmp, inplace=False)
tmp_post = adata_post_list[i].var.index[hvg_post['highly_variable']]
n_hvg_real = np.minimum(len(tmp_pre),len(tmp_post))
overlap.append((len(set(tmp_pre).intersection(set(tmp_post))))/n_hvg_real)
return np.mean(overlap)
### Cell cycle effect
def precompute_cc_score(adata, batch_key, organism='mouse',
n_comps=50, verbose=False):
batches = adata.obs[batch_key].cat.categories
scores_before = {}
s_score = []
g2m_score = []
for batch in batches:
raw_sub = adata[adata.obs[batch_key] == batch].copy()
#score cell cycle if not already done
if (np.in1d(['S_score', 'G2M_score'], adata.obs_keys()).sum() < 2):
score_cell_cycle(raw_sub, organism=organism)
s_score.append(raw_sub.obs['S_score'])
g2m_score.append(raw_sub.obs['G2M_score'])
covariate = raw_sub.obs[['S_score', 'G2M_score']]
before = pc_regression(raw_sub.X, covariate, pca_sd=None, n_comps=n_comps, verbose=verbose)
scores_before.update({batch : before})
if (np.in1d(['S_score', 'G2M_score'], adata.obs_keys()).sum() < 2):
adata.obs['S_score'] = pd.concat(s_score)
adata.obs['G2M_score'] = | pd.concat(g2m_score) | pandas.concat |
import datetime as dt
from typing import List
from src.repos.metricsData.metricsDataRepo import MetricsDataRepo
from src.utils.addMonths import addMonths
from src.config.appConfig import getREConstituentsMappings
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def fetchSection1_11_LoadCurve(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime):
mRepo = MetricsDataRepo(appDbConnStr)
pltDataObj:list = []
totalGen = mRepo.getEntityREHourlyData('wr',startDt, endDt)
df = pd.DataFrame(totalGen)
max_total_gen_position = df['val'].idxmax()
max_total_gen_dt = df['time_stamp'].iloc[max_total_gen_position]
max_str_date = dt.datetime(max_total_gen_dt.year,max_total_gen_dt.month,max_total_gen_dt.day)
totalGenOnMaxGenDay = mRepo.getEntityREHourlyData('wr',max_str_date,max_str_date)
wrDemandOnMaxGenDay = mRepo.getEntityMetricHourlyData('wr','Demand(MW)',max_str_date,max_str_date)
pltDataDf = | pd.DataFrame() | pandas.DataFrame |
from T2GEORES import geometry as geomtr
import sqlite3
import os
import pandas as pd
import json
def checktable(table_name,c):
"""It verifies the existance of a table on the sqlite database
Parameters
----------
table_name : str
Table name
c : cursor
Conection to the database
Returns
-------
int
check: if table exists returns 1
Examples
--------
>>> checktable(table_name,c)
"""
query="SELECT COUNT(name) from sqlite_master WHERE type='table' AND name='%s'"%(table_name)
c.execute(query)
if c.fetchone()[0]==1:
check=1
else:
check=0
return check
def db_creation(input_dictionary):
"""It creates a sqlite databa base
Parameters
----------
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path', usually on '../input/'
Returns
-------
database
name: database on desire path
Note
----
The tables: wells, survey, PT, mh, drawdown, cooling, wellfeedzone, t2wellblock, t2wellsource, layers and t2PTout are generated
Examples
--------
>>> db_creation(input_dictionary)
"""
db_path=input_dictionary['db_path']
if not os.path.isfile(db_path):
conn=sqlite3.connect(db_path)
c = conn.cursor()
# Create table - wells
if checktable('wells',c)==0:
c.execute('''CREATE TABLE wells
([well] TEXT PRIMARY KEY,
[type] TEXT,
[east] REAL,
[north] REAL,
[elevation] REAL,
[lnr_init] REAL,
[lnr_end] REAL,
[lnr_D] TEXT,
[ptube_init] REAL,
[ptube_end] REAL,
[ptube_D] TEXT,
[drilldate] datetime)''')
#Create table - survey
if checktable('survey',c)==0:
c.execute('''CREATE TABLE survey
([well] TEXT,
[MeasuredDepth] REAL,
[Delta_east] REAL,
[Delta_north] REAL)''')
#Create table - PT
if checktable('PT',c)==0:
c.execute('''CREATE TABLE PT
([well] TEXT,
[MeasuredDepth] REAL,
[Pressure] REAL,
[Temperature] REAL)''')
#Create table - mh
if checktable('mh',c)==0:
c.execute('''CREATE TABLE mh
([well] TEXT,
[type] TEXT,
[date_time] datetime,
[steam_flow] REAL,
[liquid_flow] REAL,
[flowing_enthalpy] REAL,
[well_head_pressure] REAL)''')
#Create table - drawdown
if checktable('drawdown',c)==0:
c.execute('''CREATE TABLE drawdown
([well] TEXT,
[date_time] datetime,
[TVD] REAL,
[pressure] REAL)''')
#Create table - cooling
if checktable('cooling',c)==0:
c.execute('''CREATE TABLE cooling
([well] TEXT,
[date_time] datetime,
[TVD] REAL,
[temp] REAL)''')
#Create table - wellfeedzone
if checktable('wellfeedzone',c)==0:
c.execute('''CREATE TABLE wellfeedzone
([well] TEXT,
[MeasuredDepth] REAL,
[contribution] REAL)''')
#Create table - TOUGH2 well block(correlative)
if checktable('t2wellblock',c)==0:
c.execute('''CREATE TABLE t2wellblock
([well] TEXT PRIMARY KEY,
[blockcorr] TEXT)''')
#Create table - TOUGH2 well source
if checktable('t2wellsource',c)==0:
c.execute('''CREATE TABLE t2wellsource
([well] TEXT,
[blockcorr] TEXT ,
[source_nickname] TEXT PRIMARY KEY)''')
#Create table - layers levels
if checktable('layers',c)==0:
c.execute('''CREATE TABLE layers
([correlative] TEXT PRIMARY KEY,
[top] REAL,
[middle] REAL,
[bottom] REAL)''')
#Create table - stores ELEME section of mesh
if checktable('ELEME',c)==0:
c.execute('''CREATE TABLE ELEME
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME] TEXT,
[NSEQ] REAL,
[NADD] REAL,
[MA1] REAL,
[MA2] REAL,
[VOLX] REAL,
[AHTX] REAL,
[PMX] REAL,
[X] REAL,
[Y] REAL,
[Z] REAL,
[LAYER_N] REAL,
[h] REAL)''')
#Create table - stores CONNE section of mesh
if checktable('CONNE',c)==0:
c.execute('''CREATE TABLE CONNE
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME1] TEXT,
[ELEME2] TEXT,
[NSEQ] REAL,
[NAD1] REAL,
[NAD2] REAL,
[ISOT] REAL,
[D1] REAL,
[D2] REAL,
[AREAX] REAL,
[BETAX] REAL,
[SIGX] REAL)''')
#Create table - stores segment
if checktable('segment',c)==0:
c.execute('''CREATE TABLE segment
([model_version] REAL,
[model_output_timestamp] timestamp,
[x1] REAL,
[y1] REAL,
[x2] REAL,
[y2] REAL,
[redundant] REAL,
[ELEME1] TEXT,
[ELEME2] TEXT)''')
#Create table - PT out
if checktable('t2PTout',c)==0:
c.execute('''CREATE TABLE t2PTout
([blockcorr] TEXT PRIMARY KEY,
[x] REAL,
[y] REAL,
[z] REAL,
[index] REAL,
[P] REAL,
[T] REAL,
[SG] REAL,
[SW] REAL,
[X1] REAL,
[X2] REAL,
[PCAP] REAL,
[DG] REAL,
[DW] REAL)''')
#Create table - stores flows TOUGH2 output section
if checktable('t2FLOWSout',c)==0:
c.execute('''CREATE TABLE t2FLOWSout
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME1] TEXT,
[ELEME2] TEXT,
[INDEX] INT,
[FHEAT] REAL,
[FLOH] REAL,
[FLOF] REAL,
[FLOG] REAL,
[FLOAQ] REAL,
[FLOWTR2] REAL,
[VELG] REAL,
[VELAQ] REAL,
[TURB_COEFF] REAL,
[model_time] REAL)''')
#Create table - stores flows directions from every block
if checktable('t2FLOWVectors',c)==0:
c.execute('''CREATE TABLE t2FLOWVectors
([model_version] REAL,
[model_output_timestamp] timestamp,
[ELEME] TEXT,
[FHEAT_x] REAL,
[FHEAT_y] REAL,
[FHEAT_z] REAL,
[FLOH_x] REAL,
[FLOH_y] REAL,
[FLOH_z] REAL,
[FLOF_x] REAL,
[FLOF_y] REAL,
[FLOF_z] REAL,
[FLOG_x] REAL,
[FLOG_y] REAL,
[FLOG_z] REAL,
[FLOAQ_x] REAL,
[FLOAQ_y] REAL,
[FLOAQ_z] REAL,
[FLOWTR2_x] REAL,
[FLOWTR2_y] REAL,
[FLOWTR2_z] REAL,
[VELG_x] REAL,
[VELG_y] REAL,
[VELG_z] REAL,
[VELAQ_x] REAL,
[VELAQ_y] REAL,
[VELAQ_z] REAL,
[TURB_COEFF_x] REAL,
[TURB_COEFF_y] REAL,
[TURB_COEFF_z] REAL,
[model_time] REAL)''')
conn.commit()
conn.close()
def insert_wells_sqlite(input_dictionary):
"""It stores the data contain on the ubication.csv file and stores it on the database
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The well name is written as primary key. Thus, if the coordinates of the file ubication.csv change, it is better to
eliminate the records and rerun this function again. Some print are expected.
Examples
--------
>>> insert_wells_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c = conn.cursor()
wells=pd.read_csv(source_txt+'ubication.csv')
wells['drilldate'] = pd.to_datetime(wells['drilldate'],format="%Y%m%d")
for index,row in wells.iterrows():
try:
q="INSERT INTO wells(well,type,east,north,elevation,drilldate) VALUES ('%s','%s',%s,%s,%s,'%s')"%\
(row['well'],row['type'],row['east'],row['north'],row['masl'],row['drilldate'])
c.execute(q)
conn.commit()
except sqlite3.IntegrityError:
print("The well %s is already on the database")
conn.close()
def insert_feedzone_to_sqlite(input_dictionary):
"""It stores the data contain on the ubication.csv file and stores it on the database
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Examples
--------
>>> insert_feedzone_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
feedzones=pd.read_csv(source_txt+'well_feedzone.csv',delimiter=',')
for index,row in feedzones.iterrows():
q="INSERT INTO wellfeedzone(well,MeasuredDepth,contribution) VALUES ('%s',%s,%s)"%\
(row['well'],row['MD'],row['contribution'])
c.execute(q)
conn.commit()
conn.close()
def insert_survey_to_sqlite(input_dictionary):
"""It stores all the data contain on the subfolder survey from the input file folder.
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The survey for every well must have the next headers MeasuredDepth,Delta_north,Delta_east
Examples
--------
>>> insert_survey_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'survey/'):
if os.path.isfile(os.path.join(source_txt, 'survey/',f)):
well_name=f.replace("'","").replace("_MD.dat","")
well_file=os.path.join(source_txt, 'survey/',f)
survey=pd.read_csv(well_file)
for index, row in survey.iterrows():
q="INSERT INTO survey(well,MeasuredDepth,Delta_north,Delta_east) VALUES ('%s',%s,%s,%s)"%\
(well_name,row['MeasuredDepth'],row['Delta_north'],row['Delta_east'])
c.execute(q)
conn.commit()
conn.close()
def insert_PT_to_sqlite(input_dictionary):
"""It stores all the data contain on the subfolder PT from the input file folder.
Parameters
----------
input_dictionary: dictionary
Dictionary containing the path and name of database and the path of the input file
Note
----
The PT for every well must have the next headers MD,P,T. The file name must be well_MDPT.dat
Examples
--------
>>> insert_PT_to_sqlite(input_dictionary)
"""
db_path=input_dictionary['db_path']
source_txt=input_dictionary['source_txt']
conn=sqlite3.connect(db_path)
c=conn.cursor()
for f in os.listdir(source_txt+'PT'):
if os.path.isfile(source_txt+'PT/'+f):
if '_MDPT' in f:
well_name=f.replace("'","").replace("_MDPT.dat","")
if os.path.isfile(source_txt+'PT/'+f):
PT= | pd.read_csv(source_txt+'PT/'+f) | pandas.read_csv |
#######
### Code to run ML models
#######
### imports
import re
import time
import numpy as np
import pandas as pd
import os
import pickle5 as pickle
import matplotlib.pyplot as plt
# ML imports
from sklearn.compose import ColumnTransformer
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GroupShuffleSplit
from sklearn.utils import resample
## modeling functions
def get_metrics(y_test, y_predicted):
'''generate evaluation scores accroding predicted y'''
accuracy = accuracy_score(y_test, y_predicted)
precision = precision_score(y_test, y_predicted, average='binary')
recall = recall_score(y_test, y_predicted, average='binary')
f1 = f1_score(y_test, y_predicted, average='binary')
return accuracy, precision, recall, f1
def estimate_models(model_list, model_list_names, X_train, y_train, X_test, y_test):
'''takes in a list of models, fit and test each one, generates a df with evaluation parameters of all models ran'''
all_fi = []
for i in range(0, len(model_list)):
## pull out model
one_model = model_list[i]
print("fitting model: " + str(one_model))
## fit the model and evaluate
one_model.fit(X_train, y_train)
if model_list_names[i] == "gb_shallow":
fi = one_model.feature_importances_
else:
fi = one_model.coef_
fi_df = pd.DataFrame({'value': fi[0],
'coef_name': X_train.columns})
fi_df['model'] = model_list_names[i]
all_fi.append(fi_df)
fi_df = pd.concat(all_fi)
print("concatenated and returned object")
return fi_df
## shortened list of model objects
model_list = [GradientBoostingClassifier(criterion='friedman_mse', n_estimators=100),
LogisticRegression(penalty = "l1",max_iter=10000, C = 0.01, solver='liblinear')]
model_list_names = ["gb_shallow", "lasso"]
assert len(model_list) == len(model_list_names)
## define paths and read in data
DROPBOX_YOUR_PATH = "Dropbox/qss20_finalproj_rawdata/summerwork/"
MODEL_OUTPUT_PATH = "Dropbox/qss20_s21_proj/output/model_outputs/"
whd_train_init = pd.read_pickle(DROPBOX_YOUR_PATH + "clean/whd_training.pkl")
whd_test = | pd.read_pickle(DROPBOX_YOUR_PATH + "clean/whd_testing.pkl") | pandas.read_pickle |
import streamlit as st
import pandas as pd
from pyvis.network import Network
import networkx as nx
import matplotlib.pyplot as plt
import bz2
import pickle
import _pickle as cPickle
import pydot
import math
import numpy as num
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
uploaded_files = st.sidebar.file_uploader("Choose files", accept_multiple_files=True)
# sidebar for navigating pages
page_nav = st.sidebar.selectbox("Select view:",('Document overviews','Focus concepts','Path views','Active Study view','Study phenomena','Study sets'))
@st.cache
def do_this_first(uploaded_files):
#st.write(st.__version__)
# Load any compressed pickle file
# for uploaded_file in uploaded_files:
# concepts = decompress_pickle(uploaded_file)
# st.write("filename:", uploaded_file.name)
filenames = [file.name for file in uploaded_files] # return this
import pandas as pd
Agg_Conceptdata = pd.DataFrame()
All_Conceptdata = pd.DataFrame()
Agg_np_to_sent = dict()
Agg_sent_to_npflat = dict()
Agg_sent_to_phen = dict()
Agg_phen_to_sent = dict()
Agg_att_to_sent = dict()
Agg_sent_to_att = dict()
Agg_ins_to_sent = dict()
Agg_sent_to_ins = dict()
Agg_set_to_sent = dict()
Agg_sent_to_set = dict()
Agg_np_to_forms = dict()
doc_to_np = dict()
np_to_doc = dict()
Agg_df = pd.DataFrame()
Agg_df = pd.DataFrame()
Agg_np_to_roles = dict()
Agg_sent_to_clt = dict()
Agg_sents = dict()
#Agg_sents_df = pd.DataFrame()
#Agg_docs_df = pd.DataFrame()
All_df = pd.DataFrame()
for uploaded_file in uploaded_files:
concepts = decompress_pickle(uploaded_file)
filename = uploaded_file.name
#st.write("filename:", uploaded_file.name)
Conceptdata = concepts['Conceptdata']
sent_to_npflat = concepts['sent_to_npflat']
np_to_sent = concepts['np_to_sent']
np_to_forms = concepts['np_to_forms']
sent_to_phen = concepts['sent_to_phen']
phen_to_sent = concepts['phen_to_sent']
sent_to_att = concepts['sent_to_att']
att_to_sent = concepts['att_to_sent']
att_to_sent = concepts['att_to_sent']
ins_to_sent = concepts['ins_to_sent']
sent_to_ins = concepts['sent_to_ins']
set_to_sent = concepts['set_to_sent']
sent_to_set = concepts['sent_to_set']
np_to_roles = concepts['np_to_roles']
sent_to_clt = concepts['sent_to_clt']
sents = concepts['sents']
df = concepts['df']
Conceptdata['docname'] = filename
Agg_Conceptdata = Agg_Conceptdata.append(Conceptdata,ignore_index=True)
Agg_sent_to_clt[filename.replace(".pbz2","")] = sent_to_clt
Agg_np_to_sent[filename.replace(".pbz2","")] = np_to_sent
Agg_sents[filename.replace(".pbz2","")] = sents
Agg_sent_to_npflat[filename.replace(".pbz2","")] = sent_to_npflat
Agg_sent_to_set[filename.replace(".pbz2","")] = sent_to_set
Agg_sent_to_att[filename.replace(".pbz2","")] = sent_to_att
Agg_sent_to_phen[filename.replace(".pbz2","")] = sent_to_phen
Agg_sent_to_ins[filename.replace(".pbz2","")] = sent_to_ins
Agg_df = Agg_df.append(df,ignore_index=True)
doc_to_np[filename] = list(np_to_sent.keys()) # return this
for np in np_to_sent:
# if np in Agg_np_to_sent:
# Agg_np_to_sent[np] = Agg_np_to_sent[np] + [(filename,s) for s in np_to_sent[np]]
# else:
# Agg_np_to_sent[np] = [(filename,s) for s in np_to_sent[np]]
if np in np_to_doc:
np_to_doc[np] = np_to_doc[np] + [filename]
else:
np_to_doc[np] = [filename]
for np in np_to_forms:
if np in Agg_np_to_forms:
Agg_np_to_forms[np] = Agg_np_to_forms[np] + np_to_forms[np]
else:
Agg_np_to_forms[np] = np_to_forms[np]
for np in np_to_roles:
if np in Agg_np_to_roles:
Agg_np_to_roles[np] = Agg_np_to_roles[np] + np_to_roles[np]
else:
Agg_np_to_roles[np] = np_to_roles[np]
for np in phen_to_sent:
if np in Agg_phen_to_sent:
Agg_phen_to_sent[np] = Agg_phen_to_sent[np] + [(filename,s) for s in phen_to_sent[np]]
else:
Agg_phen_to_sent[np] = [(filename,s) for s in phen_to_sent[np]]
for np in att_to_sent:
if np in Agg_att_to_sent:
Agg_att_to_sent[np] = Agg_att_to_sent[np] + [(filename,s) for s in att_to_sent[np]]
else:
Agg_att_to_sent[np] = [(filename,s) for s in att_to_sent[np]]
for np in set_to_sent:
if np in Agg_set_to_sent:
Agg_set_to_sent[np] = Agg_set_to_sent[np] + [(filename,s) for s in set_to_sent[np]]
else:
Agg_set_to_sent[np] = [(filename,s) for s in set_to_sent[np]]
for np in ins_to_sent:
if np in Agg_ins_to_sent:
Agg_ins_to_sent[np] = Agg_ins_to_sent[np] + [(filename,s) for s in ins_to_sent[np]]
else:
Agg_ins_to_sent[np] = [(filename,s) for s in ins_to_sent[np]]
#st.write(Agg_Conceptdata.columns)
All_Conceptdata = pd.DataFrame()
def most_common_form(np):
return pd.Series(Agg_np_to_forms[np]).value_counts().sort_values(ascending=False).index[0]
Agg_np_to_mcform = dict()
for np in Agg_np_to_forms:
Agg_np_to_mcform[np] = most_common_form(np)
All_Conceptdata = Agg_Conceptdata.groupby('Concept').agg(doc_Occurence = pd.NamedAgg('docname',lambda x: list(x)),
doc_Frequency = pd.NamedAgg('docname',lambda x: x.shape[0]),
Raw_Frequency = pd.NamedAgg('Frequency','sum'),
Mean = pd.NamedAgg('Mean','mean'),
Median = pd.NamedAgg('Median','mean'),
Sdev = pd.NamedAgg('Sdev','mean'),
Ext_IDF = pd.NamedAgg('IDF',num.nanmin))
All_Conceptdata['Mean_Frequency'] = All_Conceptdata['Raw_Frequency']/All_Conceptdata['doc_Frequency']
All_Conceptdata['normalized_RawFreq'] = All_Conceptdata['Raw_Frequency']/All_Conceptdata['Raw_Frequency'].max()
All_Conceptdata['normalized_MeanFreq'] = All_Conceptdata['Mean_Frequency']/All_Conceptdata['Mean_Frequency'].max()
All_Conceptdata['intIDF'] = All_Conceptdata['doc_Frequency'].apply(lambda x: math.log(len(filenames),2)-abs(math.log(1+x,2)))
All_Conceptdata['intmeanTFIDF'] = All_Conceptdata['normalized_MeanFreq']*All_Conceptdata['intIDF']
for filename in filenames:
colname = filename.replace(".pbz2","")
All_Conceptdata = pd.merge(left = All_Conceptdata,
right = Agg_Conceptdata.loc[Agg_Conceptdata['docname']==filename,['Concept','Frequency']],
how='left',
left_on = 'Concept',
right_on = 'Concept')
All_Conceptdata[colname+'_TF'] = All_Conceptdata['Frequency']
del All_Conceptdata['Frequency']
All_Conceptdata[colname+'_TF'].fillna(0,inplace=True)
All_Conceptdata[colname+'_IntTFIDF'] = All_Conceptdata[colname+'_TF']*All_Conceptdata['intIDF']
All_Conceptdata['MCForm'] = All_Conceptdata['Concept'].apply(lambda x: Agg_np_to_mcform[x])
All_Conceptdata['role_frac'] = All_Conceptdata['Concept'].apply(lambda x: dict( | pd.Series(Agg_np_to_roles[x]) | pandas.Series |
import pytest
import collections
from pathlib import Path
import pandas as pd
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Constant, Annotator
import pypipegraph as ppg
from pypipegraph.testing import run_pipegraph, force_load
from pandas.testing import assert_frame_equal
from mbf_genomics.util import find_annos_from_column
class LenAnno(Annotator):
def __init__(self, name):
self.columns = [name]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
@pytest.mark.usefixtures("no_pipegraph")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameDirect:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_create_from_df(self):
test_df = pd.DataFrame({"A": [1, 2]})
a = DelayedDataFrame("shu", test_df)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write()[1]
assert "/sha" in str(fn.parent.absolute())
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), test_df)
def test_write_excel(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_excel2(self):
data = {}
for i in range(0, 257):
c = "A%i" % i
d = [1, 1]
data[c] = d
test_df = pd.DataFrame(data)
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_mangle(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert (a.non_annotator_columns == ["A", "B"]).all()
def mangle(df):
df = df.drop("A", axis=1)
df = df[df.B == "c"]
return df
fn = a.write("test.csv", mangle)[1]
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), mangle(test_df))
def test_magic(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
a = DelayedDataFrame("shu", lambda: test_df)
assert hash(a)
assert a.name in str(a)
assert a.name in repr(a)
def test_annotator(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("column", "value")
a.annotate()
assert "column" in a.df.columns
assert (a.df["column"] == "value").all()
def test_add_non_anno(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(TypeError):
a += 5
def test_annotator_wrong_columns(self):
class WrongConstant(Annotator):
def __init__(self, column_name, value):
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({"shu": self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(ValueError):
a += WrongConstant("column", "value")
def test_annotator_minimum_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
assert "Direct" in str(a.load_strategy)
class MissingCalc(Annotator):
column_names = ["shu"]
with pytest.raises(AttributeError):
a += MissingCalc()
class EmptyColumnNames(Annotator):
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNames()
class EmptyColumnNamesButCacheName(Annotator):
cache_name = "shu"
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNamesButCacheName()
class MissingColumnNames(Annotator):
def calc(self, df):
pass
with pytest.raises(AttributeError):
a += MissingColumnNames()
class NonListColumns(Annotator):
columns = "shu"
def calc(self, df):
pass
with pytest.raises(ValueError):
a += NonListColumns()
def test_DynamicColumNames(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class Dynamic(Annotator):
@property
def columns(self):
return ["a"]
def calc(self, df):
return pd.DataFrame({"a": ["x", "y"]})
a += Dynamic()
a.annotate()
assert_frame_equal(
a.df, pd.DataFrame({"A": [1, 2], "B": ["c", "d"], "a": ["x", "y"]})
)
def test_annos_added_only_once(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
a += c # this get's ignored
def test_annos_same_column_different_anno(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
c = CountingConstant("hello2", "c")
a += c
a.annotate()
assert "hello2" in a.df.columns
assert count[0] == 2
d = CountingConstant("hello2", "d")
assert c is not d
with pytest.raises(ValueError):
a += d
def test_annos_same_column_different_anno2(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += A()
with pytest.raises(ValueError):
a += B()
def test_annos_dependening(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_annos_dependening_none(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [None, A(), None]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_filtering(self):
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
return | pd.DataFrame({self.columns[0]: "a"}, index=df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from config import START,END,INTERVAL,INIT_H,END_H,SATELLITE,PRODUCT,CHANNEL,TMP,OUTPUT,BBOX,PROJ,INTERP,DQC_THRESHOLD
import s3fs
import pandas as pd
import netCDF4 as nc
import requests
from datetime import datetime,timedelta
import pathlib
import errno
import os
import netCDF4
import logging
import warnings
import re
warnings.filterwarnings("ignore")
server = SATELLITE+'/'+PRODUCT+'/'
aws = s3fs.S3FileSystem(anon=True)
def file_list():
days = pd.date_range(start=START, end=END,freq='D',tz='America/Belem').strftime('%d/%m/%Y')
hours = pd.date_range(start=INIT_H,end=END_H, freq=INTERVAL).strftime('%H:%M')
data_range = []
for d in days:
for h in hours:
if h == INIT_H:
tm = d+' '+h
tm = datetime.strptime(tm, '%d/%m/%Y %H:%M')
data_range.append(tm)
else:
tm = d+' '+h
tm = datetime.strptime(tm, '%d/%m/%Y %H:%M')
data_range.append(tm)
return data_range
def regex_strack(x,y):
value = ''
try:
value = re.search(r''+str(y),x).group(0)
except:
pass
return value
def aws_file_list(list_of_files):
logging.basicConfig(filename='missing.txt', filemode='w', format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
download = | pd.DataFrame(columns=['timestamp','url']) | pandas.DataFrame |
from typing import Any, Dict, Optional, Tuple, Union
from collections import defaultdict
from datetime import datetime
from pathlib import Path
import json
import re
import uuid
from pandas import DataFrame
from schematics.exceptions import ValidationError
import lunchbox.tools as lbt
import numpy as np
from hidebound.core.parser import AssetNameParser
from hidebound.core.specification_base import SpecificationBase
import hidebound.core.tools as tools
# ------------------------------------------------------------------------------
'''
A library of tools for Database to use in construction of its central DataFrame.
'''
def _add_specification(data, specifications):
# type: (DataFrame, Dict[str, SpecificationBase]) -> None
'''
Adds specification data to given DataFrame.
Columns added:
* specification
* specification_class
* file_error
Args:
data (DataFrame): DataFrame.
specifications (dict): Dictionary of specifications.
'''
def get_spec(filename):
# type: (str) -> Dict
output = lbt.try_(
AssetNameParser.parse_specification, filename, 'error'
)
if not isinstance(output, dict):
output = dict(file_error=str(output))
for key in ['specification', 'file_error']:
if key not in output.keys():
output[key] = np.nan
return output
spec = data.filename.apply(get_spec).tolist()
spec = | DataFrame(spec) | pandas.DataFrame |
"""
Map CEMS CC generators to EIA CC units
"""
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def method_1(boilers, eia_plants):
"""
Method 1 to map boilers to eia plants
"""
# Create boiler-specific unit (Method 1)
no_eia_plant = boilers.loc[~np.in1d(boilers["Plant Code"], eia_plants), :]
no_eia_plant = no_eia_plant.reset_index()
no_eia_plant["Unit Code"] = no_eia_plant["Boiler ID"]
no_eia_plant["Unit Code Method"] = 1
return no_eia_plant
def method_2_3(boilers23, boilers_generators, generators):
"""
Method 2 and 3
"""
# Get boiler -> generator matches (Methods 2 + 3)
boilers_units = boilers23.join(boilers_generators,
on=["Plant Code", "Boiler ID"], how="inner")
boilers_units = boilers_units.join(generators[["Unit Code"]],
on=["Plant Code", "Generator ID"],
how="inner")
boilers_units = boilers_units.reset_index().drop_duplicates(["CEMSUnit",
"Unit Code"])
gen_missing_unit_code = boilers_units["Unit Code"].isna()
# Assign unit code directly (Method 2)
direct_result = boilers_units.loc[~gen_missing_unit_code, :].copy()
direct_result["Unit Code Method"] = 2
# Create generator-specific unit (Method 3)
direct_nounit_result = boilers_units.loc[gen_missing_unit_code, :].copy()
direct_nounit_result["Unit Code"] = direct_nounit_result["Generator ID"]
direct_nounit_result["Unit Code Method"] = 3
return direct_result, direct_nounit_result
def method_4(boilers4567, generators_cc):
"""
Method 4
"""
# Check for no CA/CTs
boilers_plants = boilers4567.loc[~np.in1d(boilers4567["Plant Code"],
generators_cc["Plant Code"]), :].copy()
# Create boiler-specific unit (Method 4)
boilers_plants["Unit Code"] = boilers_plants["Boiler ID"].astype(str)
boilers_plants["Unit Code Method"] = 4
return boilers_plants.reset_index()
def method_5(boilers4567, generators_cc):
"""
Method 5
"""
# Check for single unit code among all CA/CTs in plant
pos = np.in1d(generators_cc["Plant Code"], boilers4567["Plant Code"])
plants_units = generators_cc.loc[pos, ["Plant Code", "Unit Code"]]
plants_units = plants_units.drop_duplicates().set_index("Plant Code")
plants_units = plants_units["Unit Code"]
unit_code_count = plants_units.groupby(level="Plant Code").nunique()
pos = unit_code_count == 1
single_unit_plants = unit_code_count.loc[pos].index.get_values()
# Assign all boilers in plant to same unit code if single unit code exists
# (Method 5)
single_unit_plants = plants_units.loc[single_unit_plants]
result = boilers4567.join(single_unit_plants, on="Plant Code",
how="right").reset_index()
result["Unit Code Method"] = 5
return result
def method_6_7(boilers4567, generators_cc):
"""
Method 6 and 7
"""
# Check for nonsingle unit code among all CA/CTs in plant
pos = np.in1d(generators_cc["Plant Code"], boilers4567["Plant Code"])
plants_units = generators_cc.loc[pos, ["Plant Code", "Unit Code"]]
plants_units = plants_units.drop_duplicates().set_index("Plant Code")
plants_units = plants_units["Unit Code"]
unit_code_count = plants_units.groupby(level="Plant Code").nunique()
pos = unit_code_count != 1
nonsingle_unit_plants = unit_code_count.loc[pos].index.get_values()
# Group boilers and generators by plant
boiler_groups = boilers4567.loc[
np.in1d(boilers4567["Plant Code"], nonsingle_unit_plants),
:].reset_index().groupby("Plant Code")
gen_groups = generators_cc.loc[
generators_cc["Prime Mover"] == "CT", :].groupby("Plant Code")
colnames = ["Plant Code", "Boiler ID", "Generator ID", "Unit Code"]
result6 = pd.DataFrame(columns=colnames)
result7 = pd.DataFrame(columns=colnames)
# Match boilers and generators by sorting
for plant in nonsingle_unit_plants:
bs = boiler_groups.get_group(plant).sort_values("Boiler ID")
gs = gen_groups.get_group(plant).sort_values("Generator ID")
n_bs = len(bs.index)
n_gs = len(gs.index)
# Match boilers to generator unit codes (Method 6)
if n_bs <= n_gs:
gs = gs.head(n_bs)
result6 = result6.append(pd.DataFrame({
"CEMSUnit": np.array(bs["CEMSUnit"]),
"Plant Code": plant,
"Boiler ID": np.array(bs["Boiler ID"]),
"Generator ID": np.array(gs["Generator ID"]),
"Unit Code": np.array(gs["Unit Code"])}), sort=True)
# Match boilers to generator unit codes,
# creating new units for extra boilers (Method 7)
else:
bs_rem = bs.tail(n_bs - n_gs)
bs = bs.head(n_gs)
df = pd.DataFrame({"CEMSUnit": np.array(bs["CEMSUnit"]),
"Plant Code": plant,
"Boiler ID": np.array(bs["Boiler ID"]),
"Generator ID": np.array(gs["Generator ID"]),
"Unit Code": np.array(gs["Unit Code"])})
result7 = result7.append(df, sort=True)
df = pd.DataFrame({"CEMSUnit": np.array(bs_rem["CEMSUnit"]),
"Plant Code": plant,
"Boiler ID": np.array(bs_rem["Boiler ID"]),
"Unit Code": np.array(bs_rem["Boiler ID"])})
result7 = result7.append(df, sort=True)
result6["Unit Code Method"] = 6
result7["Unit Code Method"] = 7
return result6, result7
if __name__ == "__main__":
# Load CEMS boilers
boilers = pd.read_csv("../bin/emission_01-17-2017.csv",
usecols=[2, 3, 25], header=0,
names=["Plant Code", "Boiler ID", "Unit Type"])
boilers = boilers.loc[["combined cycle" in ut.lower()
for ut in boilers["Unit Type"]], :]
boilers.drop("Unit Type", axis=1, inplace=True)
index = boilers["Plant Code"].astype(str) + "_" + boilers["Boiler ID"]
boilers.index = index
boilers.index.name = "CEMSUnit"
# Load boiler-generator mapping
boilers_generators = pd.read_excel(
"../bin/6_1_EnviroAssoc_Y2017.xlsx", "Boiler Generator",
header=1, usecols=[2, 4, 5],
index_col=[0, 1], skipfooter=1)
def read_generators(f, sheet):
"""
Read generator from excel sheet
"""
return f.parse(sheet, header=1, usecols=[2, 6, 8, 9],
index_col=[0, 1], skipfooter=1)
# Load generator-unit mapping
with | pd.ExcelFile("../bin/3_1_Generator_Y2017.xlsx") | pandas.ExcelFile |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import investpy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# ### Get funds in Germany
funds = investpy.get_etfs(country='germany')
print(f"There are {len(funds)} funds avaliable in Germany")
# ## I am mostly interested in accumulating BlackRock (and later Amundi) ETFs
# So we are searching for iShares with Acc fields in the names.
n = 0
for fund in funds.name:
if (fund.find('iShares') != -1 and fund.find('Acc') != -1) or (fund.find('Lyxor') != -1 and fund.find('Acc') != -1): # or (fund.find('Amundi') != -1)
if n == 0:
df = investpy.etfs.get_etf_information(etf=fund, country='germany', as_json=False)
n += 1
else:
df_2 = investpy.etfs.get_etf_information(etf=fund, country='germany', as_json=False)
df = | pd.concat([df,df_2]) | pandas.concat |
"""Preprocessing data methods."""
import numpy as np
import pandas as pd
from autots.tools.impute import FillNA
def remove_outliers(df, std_threshold: float = 3):
"""Replace outliers with np.nan.
https://stackoverflow.com/questions/23199796/detect-and-exclude-outliers-in-pandas-data-frame
Args:
df (pandas.DataFrame): DataFrame containing numeric data, DatetimeIndex
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]
return df
def clip_outliers(df, std_threshold: float = 3):
"""Replace outliers above threshold with that threshold. Axis = 0.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
lower = df_mean - (df_std * std_threshold)
upper = df_mean + (df_std * std_threshold)
df2 = df.clip(lower=lower, upper=upper, axis=1)
return df2
def simple_context_slicer(df, method: str = 'None', forecast_length: int = 30):
"""Condensed version of context_slicer with more limited options.
Args:
df (pandas.DataFrame): training data frame to slice
method (str): Option to slice dataframe
'None' - return unaltered dataframe
'HalfMax' - return half of dataframe
'ForecastLength' - return dataframe equal to length of forecast
'2ForecastLength' - return dataframe equal to twice length of forecast
(also takes 4, 6, 8, 10 in addition to 2)
"""
if method in [None, "None"]:
return df
df = df.sort_index(ascending=True)
if 'forecastlength' in str(method).lower():
len_int = int([x for x in str(method) if x.isdigit()][0])
return df.tail(len_int * forecast_length)
elif method == 'HalfMax':
return df.tail(int(len(df.index) / 2))
elif str(method).isdigit():
return df.tail(int(method))
else:
print("Context Slicer Method not recognized")
return df
"""
if method == '2ForecastLength':
return df.tail(2 * forecast_length)
elif method == '6ForecastLength':
return df.tail(6 * forecast_length)
elif method == '12ForecastLength':
return df.tail(12 * forecast_length)
elif method == 'ForecastLength':
return df.tail(forecast_length)
elif method == '4ForecastLength':
return df.tail(4 * forecast_length)
elif method == '8ForecastLength':
return df.tail(8 * forecast_length)
elif method == '10ForecastLength':
return df.tail(10 * forecast_length)
"""
class Detrend(object):
"""Remove a linear trend from the data."""
def __init__(self):
self.name = 'Detrend'
def fit(self, df):
"""Fits trend for later detrending.
Args:
df (pandas.DataFrame): input dataframe
"""
from statsmodels.regression.linear_model import GLS
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
# formerly df.index.astype( int ).values
y = df.values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
self.model = GLS(y, X, missing='drop').fit()
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
# formerly X = df.index.astype( int ).values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
df = df.astype(float) - self.model.predict(X)
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
# from statsmodels.tools import add_constant
# X = add_constant(X, has_constant='add')
df = df.astype(float) + self.model.predict(X)
return df
class StatsmodelsFilter(object):
"""Irreversible filters."""
def __init__(self, method: str = 'bkfilter'):
self.method = method
def fit(self, df):
"""Fits filter.
Args:
df (pandas.DataFrame): input dataframe
"""
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
if self.method == 'bkfilter':
from statsmodels.tsa.filters import bk_filter
cycles = bk_filter.bkfilter(df, K=1)
cycles.columns = df.columns
df = (df - cycles).fillna(method='ffill').fillna(method='bfill')
elif self.method == 'cffilter':
from statsmodels.tsa.filters import cf_filter
cycle, trend = cf_filter.cffilter(df)
cycle.columns = df.columns
df = df - cycle
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
class SinTrend(object):
"""Modelling sin."""
def __init__(self):
self.name = 'SinTrend'
def fit_sin(self, tt, yy):
"""Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"
from user unsym @ https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
"""
import scipy.optimize
tt = np.array(tt)
yy = np.array(yy)
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
guess_freq = abs(
ff[np.argmax(Fyy[1:]) + 1]
) # excluding the zero frequency "peak", which is related to offset
guess_amp = np.std(yy) * 2.0 ** 0.5
guess_offset = np.mean(yy)
guess = np.array([guess_amp, 2.0 * np.pi * guess_freq, 0.0, guess_offset])
def sinfunc(t, A, w, p, c):
return A * np.sin(w * t + p) + c
popt, pcov = scipy.optimize.curve_fit(sinfunc, tt, yy, p0=guess, maxfev=10000)
A, w, p, c = popt
# f = w/(2.*np.pi)
# fitfunc = lambda t: A * np.sin(w*t + p) + c
return {
"amp": A,
"omega": w,
"phase": p,
"offset": c,
} # , "freq": f, "period": 1./f, "fitfunc": fitfunc, "maxcov": np.max(pcov), "rawres": (guess,popt,pcov)}
def fit(self, df):
"""Fits trend for later detrending
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
self.sin_params = pd.DataFrame()
# make this faster
for column in df.columns:
try:
y = df[column].values
vals = self.fit_sin(X, y)
current_param = pd.DataFrame(vals, index=[column])
except Exception as e:
print(e)
current_param = pd.DataFrame(
{"amp": 0, "omega": 1, "phase": 1, "offset": 1}, index=[column]
)
self.sin_params = pd.concat([self.sin_params, current_param], axis=0)
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fits and Returns Detrended DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Returns detrended data
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
sin_df = pd.DataFrame()
# make this faster
for index, row in self.sin_params.iterrows():
yy = pd.DataFrame(
row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],
columns=[index],
)
sin_df = pd.concat([sin_df, yy], axis=1)
df_index = df.index
df = df.astype(float).reset_index(drop=True) - sin_df.reset_index(drop=True)
df.index = df_index
return df
def inverse_transform(self, df):
"""Returns data to original form
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = | pd.to_numeric(df.index, errors='coerce', downcast='integer') | pandas.to_numeric |
import ccxt
from datetime import datetime, timedelta, timezone
import math
import argparse
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description = 'CCXT Market Downloader')
parser.add_argument('-s','--symbol',
type=str,
required=True,
help='The Symbol of the Instrument/Currency Pair To Download')
parser.add_argument('-e','--exchange',
type=str,
required=True,
help='The exchange to download from')
parser.add_argument('-t','--timeframe',
type=str,
default='1d',
choices=['1m', '5m','15m', '30m','1h', '2h', '3h', '4h', '6h', '12h', '1d', '1M', '1y'],
help='The timeframe to download')
parser.add_argument('--debug',
action ='store_true',
help=('Print Sizer Debugs'))
return parser.parse_args()
args = parse_args()
try:
exchange = getattr (ccxt, args.exchange) ()
except AttributeError:
print('-'*36,' ERROR ','-'*35)
print('Exchange "{}" not found. Please check the exchange is supported.'.format(args.exchange))
print('-'*80)
quit()
if exchange.has["fetchOHLCV"] == False:
print('-'*36,' ERROR ','-'*35)
print('{} does not support fetching OHLC data. Please use another exchange'.format(args.exchange))
print('-'*80)
quit()
if args.timeframe not in exchange.timeframes:
print('-'*36,' ERROR ','-'*35)
print('The requested timeframe ({}) is not available from {}\n'.format(args.timeframe,args.exchange))
print('Available timeframes are:')
for key in exchange.timeframes.keys():
print(' - ' + key)
print('-'*80)
quit()
exchange.load_markets()
if args.symbol not in exchange.symbols:
print('-'*36,' ERROR ','-'*35)
print('The requested symbol ({}) is not available from {}\n'.format(args.symbol,args.exchange))
print('Available symbols are:')
for key in exchange.symbols:
print(' - ' + key)
print('-'*80)
quit()
data = exchange.fetch_ohlcv(args.symbol, args.timeframe)
header = ['TimeStamp', 'Open', 'High', 'Low', 'Close', 'Volume']
df = | pd.DataFrame(data, columns=header) | pandas.DataFrame |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = | sql.read_sql_query("select * from test_chunksize", self.conn) | pandas.io.sql.read_sql_query |
import unittest
import pandas as pd
from dataprofiler.profilers.unstructured_text_profile import TextProfiler
from dataprofiler.profilers.profiler_options import TextProfilerOptions
class TestUnstructuredTextProfile(unittest.TestCase):
def test_text_profile_update_and_name(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
self.assertEqual("Name", text_profile.name)
def test_vocab(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
profile = text_profile.profile
# Assert vocab is correct
expected_vocab = [' ', '!', '"', "'", ',', '.', ':', 'B', 'G', 'H',
'a', 'b', 'd', 'e', 'f', 'i', 'l', 'm', 'n', 'o',
'r', 's', 't', 'y']
self.assertListEqual(sorted(expected_vocab), sorted(profile['vocab']))
# Update the data again
sample = pd.Series(["Grant knows how to code",
"Grant will code with Bob"])
text_profile.update(sample)
profile = text_profile.profile
# Assert vocab is correct
expected_vocab = [' ', '!', '"', "'", ',', '.', ':', 'B', 'G', 'H',
'a', 'b', 'c', 'd', 'e', 'f', 'h', 'i', 'k', 'l',
'm', 'n', 'o', 'r', 's', 't', 'w', 'y']
self.assertListEqual(sorted(expected_vocab), sorted(profile['vocab']))
def test_words_and_word_count(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
profile = text_profile.profile
# Assert words is correct and stop words are not present
expected_words = ['Hello', 'name', 'Grant', 'Bob', 'friends']
self.assertListEqual(expected_words, profile['words'])
self.assertNotIn("is", profile['words'])
# Assert word counts are correct
expected_word_count = {'Hello': 1, 'name': 1, 'Grant': 2, 'Bob': 1,
'friends': 1}
self.assertDictEqual(expected_word_count, profile['word_count'])
# Update the data again
sample = pd.Series(["Grant knows how to code",
"Grant will code with Bob"])
text_profile.update(sample)
profile = text_profile.profile
# Assert words is correct and stop words are not present
expected_words = ['Hello', 'name', 'Grant', 'Bob', 'friends', 'knows',
'code']
self.assertListEqual(expected_words, profile['words'])
self.assertNotIn("with", profile['words'])
# Assert word counts are correct
expected_word_count = {'Hello': 1, 'name': 1, 'Grant': 4, 'Bob': 2,
'friends': 1, 'knows': 1, 'code': 2}
self.assertDictEqual(expected_word_count, profile['word_count'])
def test_sample_size(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
# Assert sample size is accurate
self.assertEqual(2, text_profile.sample_size)
# Update the data again
sample = pd.Series(["Grant knows how to code",
"Grant will code with Bob"])
text_profile.update(sample)
# Assert sample size is accurate
self.assertEqual(4, text_profile.sample_size)
def test_timing(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
profile = text_profile.profile
# Assert timing is occurring
self.assertIn("vocab", profile["times"])
self.assertIn("words", profile["times"])
def test_merge_profiles(self):
text_profile1 = TextProfiler("Name")
sample = | pd.Series(["Hello my name is: Grant.!!!"]) | pandas.Series |
import numpy as np
import pandas as pd
import pathlib
import operator
class typyDB(object):
''' A database for storing simulation data
Attributes
----------
trial_index: pd.DataFrame, size (ntrials,ntlabels)
Table of trials and parameters describing trials. The index of this
DataFrame should be unique as it identifies the trial in other tables.
data_index: pd.DataFrame, size (ndata,ndlabels)
Table of data for all trials. Each row in the trial_index corresponds
to many rows in this table by the trial_id column.
data: dict of pd.DataFrames
dictionary keyed by data length (i.e. vector length) with DataFrames as
values. Each DataFrame stores vector data where all vectors in a
dataframe are the same length corresponding to the dict key.
'''
def __init__(self,trial_labels=None,data_labels=None,path=None,prefix=None):
'''Constructor
User should specify either both trial_ and data_labels or both path and
prefix. The former creates a new and empty db while the latter attempts
to load a db from disk.
Arguments:
----------
trial_labels: list
column labels for trial_index
data_labels: list
column labels for trial_index
path: str
path to directory containing pickled db
prefix: str
filename prefix of pickled db
'''
if (trial_labels is not None) and (data_labels is not None):
self.trial_labels = trial_labels
self.data_labels = data_labels
self.trial_index = pd.DataFrame(columns=trial_labels)
self.trial_index.name = 'trial_id'
# We add three columns to the data_index
## trial_id is for joining with trial_index
## vector_id is for joining with a data table
## length is for selecting the
self.data_index= pd.DataFrame(columns=['trial_id','vector_id','length'] + data_labels)
self.data = {}
elif (path is not None) and (prefix is not None):
self.load(path,prefix)
else:
err = 'Incompatible argument to db constructor.'
err+= '\n Either specify both trial_labels and data_labels or'
err+= '\n specify both path and prefix.'
raise ValueError(err)
def save(self,path,prefix):
'''Save db to disk
Arguments
---------
path: str
path to directory containing pickled db
prefix: str
filename prefix of pickled db
'''
path = pathlib.Path(path)
fname = path / (prefix + '-tindex.pkl')
self.trial_index.to_pickle(fname)
fname = path / (prefix + '-dindex.pkl')
self.data_index.to_pickle(fname)
for k,v in self.data.items():
fname = path / (prefix + '-{}-data.pkl'.format(k))
v.to_pickle(fname)
def load(self,path,prefix):
'''load db from disk
Arguments
---------
path: str
path to directory containing pickled db
prefix: str
filename prefix of pickled db
'''
path = pathlib.Path(path)
fname = path / (prefix + '-tindex.pkl')
self.trial_index = pd.read_pickle(fname)
self.trial_labels = list(self.trial_index.columns)
fname = path / (prefix + '-dindex.pkl')
self.data_index = pd.read_pickle(fname)
# Must strip columns added in __init__() so that add_data() can created
# temporary DataFrame without these columns
self.data_labels = list(self.data_index.columns)
self.data_labels.remove('trial_id')
self.data_labels.remove('vector_id')
self.data_labels.remove('length')
self.data = {}
for fname in path.glob(prefix + '-*-data.pkl'):
k = int(str(fname).split('-')[-2])
self.data[k] = pd.read_pickle(fname)
def validate_trial_key(self,trial_key):
'''Try to sanity check passed trial_keys.'''
try:
nkeys = np.shape(trial_key)[1] # list of trial dkeys
except IndexError:
nkeys = np.shape(trial_key)[0] # single trial dkeys
if nkeys != self.trial_index.shape[-1]:
err = 'trial_key shape mismatched with trial_index'
err += '\ntrial_key shape: {}'.format(np.shape(trial_key))
err += '\ntrial_index shape: {}'.format(np.shape(self.trial_index.shape))
raise ValueError(err)
def get_trial(self,trial_key):
'''Returns first row matching trial_key
Search through trial_index and return the first row and row_number that
matches trial_key.
Arguments
---------
trial_key: list
row-values to search for in trial_index
Returns
-------
trial: pd.DataFrame or None
If a matching row is found: DataFrame of matching row
If not found: None
trial_id: pd.DataFrame or None
If a matching row is found: row number of matching row
If not found: None
'''
self.validate_trial_key(trial_key)
#all columns must match
match = (self.trial_index==np.asarray(trial_key)).all(1)
if match.any(): #trial found!
#iloc[0] gets first match
trial = self.trial_index.loc[match].iloc[0]
trial_id = self.trial_index.loc[match].index[0]
else: #no trial found!
trial = None
trial_id = None
return trial,trial_id
def add_trials(self,trial_keys):
'''Add several trials to trial_index
.. note::
Duplicates entries are removed from the trial_index using the
DataFrame.drop_duplicates() command.
Arguments
---------
trial_keys: list of lists, or list of dict
List of rows to be added to the trial_index. Each value in the list
is a list of column values either as a sublist or a dictionary.
Example
-------
.. code-block:: python
trial_labels = ['dispersity','mass','conc']
data_labels = ['name','date','notes']
db = typyDB(trial_labels=trial_labels,data_labels=data_labels)
trial_keys = []
trial_keys.append({'dispersity':1.5,'mass':2.25,'conc':0.3})
trial_keys.append([1.25,2.25,0.3])
trial_keys.append({'dispersity':1.45,'mass':2.15,'conc':0.2})
db.add_trials(trial_keys)
db.trial_index #show all trials
'''
self.validate_trial_key(trial_keys)
new_trials = pd.DataFrame(trial_keys,columns=self.trial_labels)
try:
shift_val = max(self.trial_index.index)+1
except ValueError:
shift_val = 0
new_trials.index += shift_val
self.trial_index = pd.concat([self.trial_index,new_trials])
self.trial_index.drop_duplicates(inplace=True)
def add_trial(self,trial_key):
'''Add a single trial to trial_index
.. note::
Duplicates entries are not added to the trial_index
Arguments
---------
trial_keys: list of lists, or list of dict
List of rows to be added to the trial_index. Each value in the list
is a list of column values either as a sublist or a dictionary.
Returns
-------
trial: pd.DataFrame
DataFrame row corresponding to the added trial in the trial_index
trial_id: int
pandas index to the trial_index DataFrame
Example
-------
.. code-block:: python
trial_labels = ['dispersity','mass','conc']
data_labels = ['name','date','notes']
db = typyDB(trial_labels=trial_labels,data_labels=data_labels)
db.add_trials([1.25,2.25,0.3)
db.trial_index #show all trials
'''
self.validate_trial_key(trial_key)
# look for trial in trial_index
trial,trial_id = self.get_trial(trial_key)
if trial_id is None: #need to add
try:
trial_id = max(self.trial_index.index) + 1
except ValueError:
trial_id = 0
self.trial_index.loc[trial_id] = trial_key
trial = self.trial_index.loc[trial_id]
else: # Do nothing because trial is already in trial_index
pass
return trial,trial_id
def add_data(self,data_keys,data,trial_key=None,trial_id=None):
if (trial_key is None) and (trial_id is None):
raise ValueError('Must specify either trial_key or trial_id')
elif trial_id is None:
_,trial_id = self.get_trial(trial_key)
if trial_id is None:
raise ValueError('trial_id not specified or not found in trial_index')
if len(data_keys)!=len(data):
raise ValueError('data_keys and data do not have the same number of entries')
if np.ndim(data)!=2:
raise ValueError('data array must be 2D with ndata rows of the same column length')
data = pd.DataFrame(data)
ndata,ldata = data.shape
if not (ldata in self.data):
istart = 0
self.data[ldata] = data
self.data[ldata].index.name = 'vector_id'
else:
istart = self.data[ldata].shape[0]
self.data[ldata] = pd.concat([self.data[ldata],data],ignore_index=True,sort=True)
self.data[ldata].index.name = 'vector_id'
data_index = pd.DataFrame(data_keys,columns=self.data_labels)
data_index['trial_id'] = trial_id
data_index['vector_id'] = np.arange(istart,istart+ndata,dtype=int)
data_index['length'] = ldata
self.data_index = pd.concat([self.data_index,data_index],ignore_index=True,sort=True)
# need to make sure the trial_id column is mergable with the trial_index index
self.data_index.trial_id = self.data_index.trial_id.astype(int)
def build_mask(self,sel,index):
'''
Arguments
---------
sel: dict
dictionary of specifiers, where the keys of the dictionary match
columns of the index. See below for more information.
Case 1: Value of sel at a key is a str/int/float.
Result: Rows are matched where value at in column == key is equal t the value
Case 2: Value of sel at a key is a dict with two keys: op and val
Case 3: Value of sel at a key is a list of values and/or dictionaries.
'''
mask = np.ones(index.shape[0],dtype=bool)
for k,v1 in sel.items():
# v1 needs to be iterable; hopefully no one passed an ndarray
if not isinstance(v1,list):
v1 = [v1]
sub_mask = np.zeros_like(mask,dtype=bool)
for v2 in v1:
if isinstance(v2,dict):# specialized operator
op = v2['op']
val = v2['val']
else: # assume operature.eq
op = operator.eq
val = v2
sub_mask = np.logical_or(sub_mask,op(index[k],val))
mask &= sub_mask
return mask
def select(self,trial_select=None,data_select=None,trial_id=None):
'''
Arguments
---------
trial_select,data_select: int/float/str/dict or list of int/float/str/dict
See description above
'''
if all([sel is None for sel in [trial_select,data_select,trial_id]]):
raise ValueError('data_select and (trial_select or trial_id) must be specified.')
elif all([sel is None for sel in [trial_select,trial_id]]):
raise ValueError('(trial_select or trial_id) must be specified.')
elif all([sel is not None for sel in [trial_select,trial_id]]):
raise ValueError('Do not specify both trial_select or trial_id.')
if trial_select is not None:
# boolean mask for trial_index df
trial_mask = self.build_mask(trial_select,self.trial_index)
else:
trial_mask = trial_id
trial_index_sel = self.trial_index.loc[trial_mask]
# left index in trial_index should correspond to trial_id column
data_index_sel1 = trial_index_sel.merge(self.data_index,
left_index=True,
right_on='trial_id')
# boolean mask for data_index
data_index_mask = self.build_mask(data_select,data_index_sel1)
data_index_sel2 = data_index_sel1.loc[data_index_mask]
index = []
data = []
for data_len,group in data_index_sel2.groupby('length'):
index.append(group)
data_mask = group.vector_id
data.append(self.data[data_len].loc[data_mask])
if len(index) == 1:
return index[0],data[0]
else:
return index,data
def select_x(self,indexx,datax,trial_select=None,data_select_y=None,trial_id_y=None):
if indexx.shape[0] != 1:
raise ValueError('select_x() only works get a single dataset (1 row) is passed to it')
dx = datax.iloc[0]
idx = datax.index[0]
ix = indexx.index[0]
indexy,datay = self.select(trial_select,data_select_y,trial_id_y)
indexxy = []
dataxy = []
for (iy,y),(idy,dy) in zip(indexy.iterrows(),datay.iterrows()):
dy.index = datax.iloc[0]
dataxy.append(dy)
del y['vector_id']
y['vector_id_x'] = idx
y['vector_id_y'] = idy
y['data_id_x'] = ix
y['data_id_y'] = iy
indexxy.append(y)
index = pd.DataFrame(indexxy)
data = pd.DataFrame(dataxy)
return index,data
def select_xy(self,trial_select=None,data_select_x=None,data_select_y=None,trial_id_x=None,trial_id_y=None):
indexx,datax = self.select(trial_select,data_select_x,trial_id_x)
indexy,datay = self.select(trial_select,data_select_y,trial_id_y)
if indexx.shape != indexy.shape:
raise ValueError('X and Y data are not the same shape. Aborting merge.')
indexxy = []
dataxy = []
for (ix,x),(iy,y),(idx,dx),(idy,dy) in zip(indexx.iterrows(),indexy.iterrows(),datax.iterrows(),datay.iterrows()):
dy.index = dx
dataxy.append(dy)
del y['vector_id']
y['vector_id_x'] = idx
y['vector_id_y'] = idy
y['data_id_x'] = ix
y['data_id_y'] = iy
indexxy.append(y)
index = | pd.DataFrame(indexxy) | pandas.DataFrame |
# coding=utf-8
import pandas as pd
import xgboost as xgb
from sklearn.metrics import f1_score
import param
############################ 定义评估函数 ############################
def micro_avg_f1(preds, dtrain):
y_true = dtrain.get_label()
return 'micro_avg_f1', f1_score(y_true, preds, average='micro')
############################ 加载特征 & 标签 ############################
df_tfidf_lr = | pd.read_csv(param.data_path + '/output/feature/tfidf/lr_prob_12w.csv') | pandas.read_csv |
import csv,yaml,os
import pandas as pd
import json
with open('player_map.json','r') as fd:
player_map = json.load(fd)
yaml_list = os.listdir()
def make_reg(temp):
if temp[1].islower():
return temp
temp=temp.split()
reg=temp[0][0]+'.*'+temp[-1]
return reg
def find_player(name,df):
if name in player_map:
k = player_map[name]
player = df.filter(regex=k,axis = 0)
return player.iloc[0]
else:
print(name)
return -1
bats_cluster = pd.read_csv('batsman_cluster.csv',index_col='player_name')
bowl_cluster = pd.read_csv('bowler_cluster.csv',index_col='player_name')
with open('decisiontree_ballinfo.csv','w') as csvfile:
fieldnames = ['batsman','nonstrike','bowler','runs','wickets','home','ave_score','sr','bowl_ave','bowl_sr','econ','innings','wickets']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
batsman_info = dict()
bowler_info = dict()
for num,yaml_file in enumerate(yaml_list):
if 'yaml' in yaml_file:
with open(yaml_file,"r") as stream:
try:
yaml_dict = yaml.load(stream)
except yaml.YAMLError as err:
print(err)
team1 = yaml_dict['info']['teams'][0]
for number,innings in enumerate(yaml_dict['innings']):
wickets=0
total = 0
innings_info = list(innings.values())[0]
if innings_info['team'] == team1:
home = 1
else:
home = 0
cur_over = 0
for ball in innings_info['deliveries']:
ball_info = list(ball.values())[0]
prev_over = cur_over
cur_over = int(list(ball.keys())[0])
print(cur_over,prev_over)
if prev_over!=cur_over:
print(prev_over,{'batsman':batsman_info[batsman]['prediction'],'nonstrike':ball_info['non_striker'],'bowler':bowler_info[bowler]['prediction'],'out':out,'home':home,'ave_score':batsman_info[batsman]['ave_score'],'sr':batsman_info[batsman]['sr'],'bowl_ave':bowler_info[bowler]['bowl_ave'],'bowl_sr':bowler_info[bowler]['bowl_sr'],'econ':bowler_info[bowler]['econ'],'innings':number%2,'wickets':wickets,'runs':total})
writer.writerow({'batsman':batsman_info[batsman]['prediction'],'nonstrike':batsman_info[nonstrike]['prediction'],'bowler':bowler_info[bowler]['prediction'],'runs':total,'wickets':wickets,'home':home,'ave_score':batsman_info[batsman]['ave_score'],'sr':batsman_info[batsman]['sr'],'bowl_ave':bowler_info[bowler]['bowl_ave'],'bowl_sr':bowler_info[bowler]['bowl_sr'],'econ':bowler_info[bowler]['econ'],'innings':number%2})
total = 0
wickets = 0
batsman = ball_info['batsman']
bowler = ball_info['bowler']
nonstrike=ball_info['non_striker']
if batsman not in batsman_info:
batsman_info[batsman] = find_player(batsman,bats_cluster)
if type(batsman_info[batsman]) == int:
temp_dict = {'prediction':0,'ave_score':18.161195652173927, 'sr':124.90467391304348, 'balls_faced':512.4130434782609, 'hundreds/innings':0.0003079888613018342, 'fifties/innings':0.03316629905487374, 'fours_rate':0.10561940238357537, 'six_rate':0.045928253202445646, 'vulnerability':0.08160455808205533}
batsman_info[batsman] = pd.Series(temp_dict)
if nonstrike not in batsman_info:
batsman_info[nonstrike] = find_player(nonstrike,bats_cluster)
if type(batsman_info[nonstrike]) == int:
temp_dict = {'prediction':0,'ave_score':18.161195652173927, 'sr':124.90467391304348, 'balls_faced':512.4130434782609, 'hundreds/innings':0.0003079888613018342, 'fifties/innings':0.03316629905487374, 'fours_rate':0.10561940238357537, 'six_rate':0.045928253202445646, 'vulnerability':0.08160455808205533}
batsman_info[nonstrike] = pd.Series(temp_dict)
if bowler not in bowler_info:
bowler_info[bowler] = find_player(bowler,bowl_cluster)
if type(bowler_info[bowler]) == int:
temp_dict = {'prediction':3, 'bowl_ave':27.86269230769231, 'econ':7.669653846153847, 'bowl_sr':21.53619230769231, 'balls':430.4730769230769}
bowler_info[bowler] = | pd.Series(temp_dict) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 15:38:06 2019
使用selenium抓取数据模板
@author: lei
"""
import time
import re
import requests
import numpy as np
import pandas as pd
from selenium.webdriver.common.by import By
from selenium_chrome import chrome
from selenium.webdriver.support import expected_conditions as EC
class Spyder():
# file_name为保存文件名
def __init__(self, file_name='数据结果.xlsx'):
self.file_name = file_name
self.driver, self.wait = chrome()
self.run()
def run(self):
self.defined_var() # 第一步:定义待爬取数据
self.generate_urls() # 第二步:生成待爬取网址
self.main() # 第三步:爬取主要过程
self.driver.close() # 第四步:关闭谷歌浏览器
self.clean_data() # 第五步:清洗数据
self.save_data() # 第六步:保存数据
def defined_var(self): # 定义待爬取数据
self.value = [] # 原始数据
self.val = [] # 清洗后数据
def generate_urls(self):
pass
def main(self):
pass
def clean_data(self): # 清洗数据
pass
def save_data(self):
df = | pd.DataFrame(self.val, columns=['爬取结果']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## Generating CSV
# In[81]:
import os
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import IPython.display as ipd
import librosa
import pandas as pd
# In[12]:
root_dir="D:/Sundar/speech/Speaker/Sachin"
for dir_, _, files in os.walk(root_dir):
for file_name in files:
print(file_name)
# In[19]:
root_dir="D:/Sundar/speech/Speaker/Sachin/"
for dir_, _, files in os.walk(root_dir):
for file_name in files:
if not file_name == '.DS_Store':
rel_dir = os.path.relpath(dir_, root_dir)
print(rel_dir)
rel_file = os.path.join(rel_dir, file_name)
file_no_ext,ext=os.path.splitext(file_name)
y, sr = librosa.load(root_dir + rel_file)
np.savetxt(root_dir+rel_dir+"/"+file_no_ext+".csv",y.transpose(),delimiter=",")
# ## Generating RPs
# In[20]:
import os
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot as plt
#import scipy.io
import pyts
import pandas as pd
from pyts.image import RecurrencePlot
# In[ ]:
root_dir="D:/Sundar/speech/Speaker/Sachin/"
for dir_, _, files in os.walk(root_dir):
for file_name in files:
if not file_name == '.DS_Store' and file_name[-4:]!= '.wav':
rel_dir = os.path.relpath(dir_, root_dir)
rel_file = os.path.join(rel_dir, file_name)
file_no_ext,ext=os.path.splitext(file_name)
df = pd.read_csv(root_dir + rel_file, index_col=None, header=None)
count = 0
end = df.shape[0]
for i in range(0, end, 600):
f = i + 600
frame = df[i:f]
generate_rp(frame, 6, None, 0, file_no_ext, count)
count +=1
generate_rp(df[i:], 6, None, 0, file_no_ext, count)
# In[115]:
def generate_rp(frame, dimension, threshold, percentage, file_name, count):
data_rp = []
data_rp.append(frame.values.reshape(1,-1))
data_rp.append(frame.values.reshape(1,-1))
data_rp = np.asarray(data_rp)
# Recurrence plot transformation
#X_rp1 = RecurrencePlot(dimension=3, time_delay=1,threshold=None, percentage=0).fit_transform(data_rp[0])[0]
X_rp1 = RecurrencePlot(dimension=dimension, time_delay=1,threshold=threshold, percentage=percentage).fit_transform(data_rp[0])[0]
imgplot = plt.imshow(X_rp1, cmap='binary', origin='lower')
fig1 = plt.gcf()
#plt.show()
plt.draw()
fig1.savefig('D:/Sundar/speech/Speaker/Threshold/0/Sachin/'+file_name+'_RP'+str(count)+'.png')
# In[ ]:
''''
dir_name = root_dir
#map_signal = {"Wav_csv":"seizure activity"}
all_files = os.listdir(dir_name)
li = []
for i in range(1,2):
df = pd.read_csv(dir_name+"/"+all_files[2], index_col=None, header=None)
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
frame = frame.rename(columns={0:dir_name})
frame.head()'''
#Shape: (409700,1)
#i = 3000
#f = 3600
#i=7700
#f=8300
#print(len(frame[:f]))
#print(len(frame[:]))'
# In[103]:
data_rp = []
data_rp.append(frame.values.reshape(1,-1))
data_rp.append(frame.values.reshape(1,-1))
data_rp = np.asarray(data_rp)
# Recurrence plot transformation
#X_rp1 = RecurrencePlot(dimension=3, time_delay=1,threshold=None, percentage=0).fit_transform(data_rp[0])[0]
X_rp1 = RecurrencePlot(dimension=3, time_delay=1,threshold='point', percentage=5).fit_transform(data_rp[0])[0]
imgplot = plt.imshow(X_rp1, cmap='binary', origin='lower')
fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('Sound/threshold/0/anil/'+'sample'+'_RP'+'1'+'.png')
# ### Recurrence Plots for A, C & E at None Threshold
# In[ ]:
dir_names = ["/gdrive/My Drive/EEG/S"]
# In[ ]:
for dir_name in dir_names:
all_files = os.listdir(dir_name)
li = []
for i in range(len(all_files)):
df = pd.read_csv(dir_name+"/"+all_files[i], index_col=None, header=None, engine="python")
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
frame = frame.rename(columns={0:dir_name})
for i,f in zip(range(0,409100,600), range(700,409700,600)):
data_rp = []
data_rp.append(frame[dir_name][i:f].values.reshape(1,-1))
data_rp.append(frame[dir_name][i:f].values.reshape(1,-1))
data_rp = np.asarray(data_rp)
# Recurrence plot transformation
rp = RecurrencePlot(threshold=None)
X_rp = rp.fit_transform(data_rp[0])[0]
# Show the results for the first time series
plt.figure(figsize=(5, 5))
plt.imshow(X_rp, cmap='binary', origin='lower')
plt.title('Recurrence Plot', fontsize=16)
plt.tight_layout()
plt.savefig("/gdrive/My Drive/EEG/RP_None/"+dir_name[-1]+"/"+str(i)+"-"+str(f)+".png")
plt.close()
print(dir_name+" done!")
# ### Recurrence Plots for A, C & E at Point Threshold - 5%, 25%, 75%
# In[ ]:
for dir_name in dir_names:
all_files = os.listdir(dir_name)
li = []
for i in range(len(all_files)):
df = | pd.read_csv(dir_name+"/"+all_files[i], index_col=None, header=None, engine="python") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 11:04:14 2021
@author: jose-
"""
import pandas as pd
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import tkinter.scrolledtext as scrolledtext
def getExcel ():
global df
import_file_path = filedialog.askopenfilename()
data = pd.read_excel (import_file_path)
df = | pd.DataFrame(data,columns=['No.','TIPO DE CONSEJO','CABECERA','MODALIDAD']) | pandas.DataFrame |
import popsims
import numpy as np
import matplotlib.pyplot as plt
import wisps
import pandas as pd
import wisps.simulations as wispsim
from tqdm import tqdm
import astropy.units as u
import numba
from scipy.interpolate import griddata
from popsims import galaxy
def probability_of_selection(spt, snr):
"""
probablity of selection for a given snr and spt
"""
ref_df=wispsim.SELECTION_FUNCTION.dropna()
#self.data['spt']=self.data.spt.apply(splat.typeToNum)
interpoints=np.array([ref_df.spt.values, ref_df.logsnr.values]).T
return griddata(interpoints, ref_df.tot_label.values , (spt, np.log10(snr)), method='linear')
def get_snr(exp_grism, appf110s, appf140s, appf160s):
#print (exp_grism)
snrjs110= 10**(fit_snr_exptime( exp_grism, appf110s, *list(wispsim.MAG_LIMITS['snr_exp']['F110'])))
snrjs140= 10**(fit_snr_exptime( exp_grism, appf140s, *list(wispsim.MAG_LIMITS['snr_exp']['F140'])))
snrjs160= 10**(fit_snr_exptime( exp_grism, appf160s, *list(wispsim.MAG_LIMITS['snr_exp']['F160'])))
#assign upper and lo limits
snr_bool_up= np.logical_or.reduce([ appf110s >25, appf140s >25, appf160s>24])
snr_bool_do= np.logical_or.reduce([ appf110s <15, appf140s <15, appf160s>15])
snrjs= np.nanmin(np.vstack([snrjs110, snrjs140, snrjs160]), axis=0)
return snrjs
def format_maglimits(wisp_limits):
return {'WFC3_F110W':[16, wisp_limits['F110']],\
'WFC3_F140W':[16, wisp_limits['F140']],\
'WFC3_F160W':[16,wisp_limits['F160']]}
def make_cuts(df, dcts, expt):
snr=get_snr(expt, df.WFC3_F110W.values, df.WFC3_F140W.values, df.WFC3_F160W.values)
bools0=np.logical_or.reduce([df[k]< dcts[k][1] for k in dcts.keys()])
return df[np.logical_and(bools0, snr>=3)]
def get_average_distance_limits(p, cuts):
p.mag_limits=cuts
return dict( | pd.DataFrame(p.distance_limits) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains function templates used by the auto-generation script
"""
# below imports are copied into the auto-generated source file as-is
# for the auto-generation script to work ensure they are not mixed up with code
import numba
import numpy
import operator
import pandas
from numba.core.errors import TypingError
from numba import types
from sdc.utilities.sdc_typing_utils import (TypeChecker, check_index_is_numeric, check_types_comparable,
find_common_dtype_from_numpy_dtypes)
from sdc.datatypes.common_functions import (sdc_join_series_indexes, )
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (string_array_type, str_arr_is_na)
from sdc.utilities.utils import sdc_overload, sdc_overload_method
from sdc.functions import numpy_like
from sdc.datatypes.range_index_type import RangeIndexType
def sdc_pandas_series_binop(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.binop
Limitations
-----------
Parameters ``level`` and ``axis`` are currently unsupported by Intel Scalable Dataframe Compiler
Examples
--------
.. literalinclude:: ../../../examples/series/series_binop.py
:language: python
:lines: 27-
:caption:
:name: ex_series_binop
.. command-output:: python ./series/series_binop.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.binop` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
_func_name = 'Method binop().'
ty_checker = TypeChecker(_func_name)
self_is_series, other_is_series = isinstance(self, SeriesType), isinstance(other, SeriesType)
if not (self_is_series or other_is_series):
return None
# this overload is not for string series
self_is_string_series = self_is_series and isinstance(self.dtype, types.UnicodeType)
other_is_string_series = other_is_series and isinstance(other.dtype, types.UnicodeType)
if self_is_string_series or other_is_string_series:
return None
if not isinstance(self, (SeriesType, types.Number)):
ty_checker.raise_exc(self, 'pandas.series or scalar', 'self')
if not isinstance(other, (SeriesType, types.Number)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
operands_are_series = self_is_series and other_is_series
if operands_are_series:
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
series_data_comparable = check_types_comparable(self, other)
if not series_data_comparable:
raise TypingError('{} Not supported for not-comparable operands. \
Given: self={}, other={}'.format(_func_name, self, other))
if not isinstance(level, types.Omitted) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(fill_value, (types.Omitted, types.Number, types.NoneType)) and fill_value is not None:
ty_checker.raise_exc(fill_value, 'number', 'fill_value')
fill_value_is_none = isinstance(fill_value, (types.NoneType, types.Omitted)) or fill_value is None
if not isinstance(axis, types.Omitted) and axis != 0:
ty_checker.raise_exc(axis, 'int', 'axis')
# specializations for numeric series only
if not operands_are_series:
def _series_binop_scalar_impl(self, other, level=None, fill_value=None, axis=0):
if self_is_series == True: # noqa
numpy_like.fillna(self._data, inplace=True, value=fill_value)
result_data = numpy.empty(len(self._data), dtype=numpy.float64)
result_data[:] = self._data + numpy.float64(other)
return pandas.Series(result_data, index=self._index, name=self._name)
else:
numpy_like.fillna(other._data, inplace=True, value=fill_value)
result_data = numpy.empty(len(other._data), dtype=numpy.float64)
result_data[:] = numpy.float64(self) + other._data
return pandas.Series(result_data, index=other._index, name=other._name)
return _series_binop_scalar_impl
else: # both operands are numeric series
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_binop_none_indexes_impl(self, other, level=None, fill_value=None, axis=0):
numpy_like.fillna(self._data, inplace=True, value=fill_value)
numpy_like.fillna(other._data, inplace=True, value=fill_value)
if (len(self._data) == len(other._data)):
result_data = numpy_like.astype(self._data, numpy.float64)
result_data = result_data + other._data
return pandas.Series(result_data)
else:
left_size, right_size = len(self._data), len(other._data)
min_data_size = min(left_size, right_size)
max_data_size = max(left_size, right_size)
result_data = numpy.empty(max_data_size, dtype=numpy.float64)
_fill_value = numpy.nan if fill_value_is_none == True else fill_value # noqa
if (left_size == min_data_size):
result_data[:min_data_size] = self._data
for i in range(min_data_size, len(result_data)):
result_data[i] = _fill_value
result_data = result_data + other._data
else:
result_data[:min_data_size] = other._data
for i in range(min_data_size, len(result_data)):
result_data[i] = _fill_value
result_data = self._data + result_data
return | pandas.Series(result_data) | pandas.Series |
from my_lambdata.lambdata import DataFrameTransmogrifier
import pandas as pd
from pandas.testing import assert_frame_equal
import unittest
# Docstring
'''
DOCSTRING
Tests the functionality of DataFrameTransmogrifier and its
various capabilities
'''
base_df = pd.DataFrame({
'name': ['Anders', 'Charles', 'Bryce'],
'score': [87, 32, 58],
'date': ['09-03-2020', '02-29-2020', '01-15-2019']
})
class LambdataTest(unittest.TestCase):
'''
Tests functionality for DataFrameTransmogrifier
'''
def test_lambdata_init(self):
# test initialization of DataFrameTransmogrifier
df = base_df.copy()
df_t = DataFrameTransmogrifier(df)
assert_frame_equal(df_t.df, df)
def test_lambdata_date_split(self):
# test splitting datetimes
df = base_df.copy()
df['date'] = pd.to_datetime(base_df['date'],
infer_datetime_format=True)
df_t = DataFrameTransmogrifier(df)
# run the function itself
df_t.split_datetime()
# run what I think should happen
df['date_month'] = df['date'].dt.month
df['date_day'] = df['date'].dt.day
df['date_year'] = df['date'].dt.year
# check if i've done this right
assert_frame_equal(df_t.df, df)
def test_lambdata_add_column(self):
# test adding a column of data
df = base_df.copy()
# create some data to test with
data_to_add = [1, 0, 1]
df_t = DataFrameTransmogrifier(df)
# run what I think should happen
df['ones_and_zeroes'] = pd.Series(data_to_add, index=df.index)
# run the function itself
df_t.add_column(data_to_add, 'ones_and_zeroes')
# check if i've done this right
| assert_frame_equal(df_t.df, df) | pandas.testing.assert_frame_equal |
import glob
import math
import brewer2mpl
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator
SPINE_COLOR = 'gray'
#####################################################
# Process average from files #
#####################################################
def process_average(folder, scenarios, labels, header):
columns = ['property']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=header, names=columns)['property']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result = pd.concat(dfs1, axis=1)
return result
class Plotter():
#####################################################
# Latexify #
#####################################################
@staticmethod
def latexify(fig_width=None, fig_height=None, columns=1, fullwidth=False):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
if fullwidth:
fig_width = 3.39*2 if columns==1 else 6.9 # width in inches
else:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio
if fullwidth:
fig_height = fig_width*golden_mean/2.0 # height in inches
else:
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {
'backend': 'ps',
'text.latex.preamble': ['\\usepackage{amssymb}'],
'axes.labelsize': 5, # fontsize for x and y labels (was 10)
'axes.titlesize': 5,
'lines.markersize' : 3,
'lines.markeredgewidth': 0.3,
'legend.fontsize': 4, # was 10
'text.usetex': True,
'legend.edgecolor': 'w',
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif',
'grid.linestyle': 'dashed',
'grid.color': 'grey',
'lines.dashed_pattern' : [150, 150],
'xtick.color': 'k',
'ytick.color': 'k',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.minor.width': 0.05,
'ytick.minor.width': 0.05,
'xtick.major.width': 0.1,
'ytick.major.width': 0.1,
'xtick.labelsize': 4,
'ytick.labelsize': 4,
'lines.linewidth' : 0.2,
'grid.linewidth': 0.01,
'axes.linewidth': 0.2,
'errorbar.capsize' : 1,
'xtick.minor.visible': False, # visibility of minor ticks on x-axis
# 'ytick.minor.visible': False, # visibility of minor ticks on x-axis
'boxplot.notch': False,
'boxplot.vertical': True,
'boxplot.whiskers': 1.5,
'boxplot.bootstrap': None,
'boxplot.patchartist': False,
'boxplot.showmeans': False,
'boxplot.showcaps': True,
'boxplot.showbox': True,
'boxplot.showfliers': True,
'boxplot.meanline': False,
'boxplot.flierprops.color': 'lightgrey',
'boxplot.flierprops.marker': 'o',
'boxplot.flierprops.markerfacecolor': 'none',
'boxplot.flierprops.markeredgecolor': 'lightgrey',
'boxplot.flierprops.markersize': 1,
'boxplot.flierprops.linestyle': 'none',
'boxplot.flierprops.linewidth': 0.1,
'boxplot.boxprops.color': 'C2',
'boxplot.boxprops.linewidth': 0.2,
'boxplot.boxprops.linestyle': '-',
'boxplot.whiskerprops.color': 'C2',
'boxplot.whiskerprops.linewidth': 0.2,
'boxplot.whiskerprops.linestyle': '-',
'boxplot.capprops.color': 'C2',
'boxplot.capprops.linewidth': 0.2,
'boxplot.capprops.linestyle': '-',
'boxplot.medianprops.color': 'C2',
'boxplot.medianprops.linewidth': 0.20,
'boxplot.medianprops.linestyle': '-',
'boxplot.meanprops.color': 'C2',
'boxplot.meanprops.marker': '^',
'boxplot.meanprops.markerfacecolor': 'C2',
'boxplot.meanprops.markeredgecolor': 'C2',
'boxplot.meanprops.markersize': 6,
'boxplot.meanprops.linestyle': 'none',
'boxplot.meanprops.linewidth': 0.20,
}
matplotlib.rcParams.update(params)
# for spine in ['top', 'right']:
# ax.spines[spine].set_visible(False)
# for spine in ['left', 'bottom']:
# ax.spines[spine].set_color(SPINE_COLOR)
# ax.spines[spine].set_linewidth(0.1)
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
# # Or if you want different settings for the grids:
# ax.grid(which='minor', alpha=0.2)
# ax.grid(which='major', alpha=0.5)
# for axis in [ax.xaxis, ax.yaxis]:
# axis.set_tick_params(direction='out', color=SPINE_COLOR)
# return ax
#####################################################
# Latency - Mean - 4 methods #
#####################################################
@staticmethod
def latency_avg_4methods(folder1, folder2, folder3, folder4, scenarios, labels, output, title, ylim, fullwidth=False):
plt.figure()
Plotter.latexify(fullwidth=fullwidth)
columns = ['latency']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder1 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=4, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result1 = pd.concat(dfs1, axis=1)
#print('result1\n', result1.describe())
#print('result1\n', result1.to_string())
std1 = result1.std()
ax1 = result1.mean().plot(label="Sourcey", legend = True, yerr=std1, color="red")
ax1.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs2 = []
for scenario in scenarios:
file = glob.glob(folder2 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs2 += [df]
result2 = pd.concat(dfs2, axis=1)
#print('result2\n', result2.describe())
#print('result2\n', result2.to_string())
std2 = result2.std()
ax2 = result2.mean().plot(label="Sourcey Fabric", legend = True, yerr=std2, color="orange")
ax2.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs3 = []
for scenario in scenarios:
file = glob.glob(folder3 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs3 += [df]
result3 = pd.concat(dfs3, axis=1)
#print('result3\n', result3.describe())
#print('result3\n', result3.to_string())
std3 = result3.std()
ax3 = result3.mean().plot(label="PolKA", legend = True, yerr=std3, color="blue")
ax3.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
dfs4 = []
for scenario in scenarios:
file = glob.glob(folder4 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs4 += [df]
result4 = pd.concat(dfs4, axis=1)
#print('result4\n', result4.describe())
#print('result4\n', result4.to_string())
std4 = result4.std()
ax4 = result4.mean().plot(label="PolKA Fabric", legend = True, yerr=std4, color="green")
ax4.set_ylim(0, ylim)
ax4.tick_params(axis='both', which='major', labelsize=5)
ax4.grid(b=True, which='major', linestyle='dashed', axis='x')
ax4.grid(b=True, which='major', linestyle='dashed', axis='y')
ax4.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
plt.title(title)
plt.ylabel('RTT Latency (s)')
plt.xlabel('Number of Hops')
plt.tight_layout()
plt.savefig(output)
#####################################################
# Latency - Mean - 4 methods #
#####################################################
@staticmethod
def latency_avg_4methods_bar(folder1, folder2, folder3, folder4, scenarios, labels, output, title, ylim, fullwidth=False):
plt.figure()
Plotter.latexify(fullwidth=fullwidth)
columns = ['latency']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder1 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=4, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result1 = pd.concat(dfs1, axis=1)
std1 = result1.std()
dfs2 = []
for scenario in scenarios:
file = glob.glob(folder2 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs2 += [df]
result2 = pd.concat(dfs2, axis=1)
std2 = result2.std()
dfs3 = []
for scenario in scenarios:
file = glob.glob(folder3 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs3 += [df]
result3 = pd.concat(dfs3, axis=1)
std3 = result3.std()
dfs4 = []
for scenario in scenarios:
file = glob.glob(folder4 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=None, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs4 += [df]
result4 = pd.concat(dfs4, axis=1)
std4 = result4.std()
x=scenarios #since the date are the same in both tables I only have 1 x
aa=dict(result1.mean())
bb=dict(result2.mean())
cc=dict(result3.mean())
dd=dict(result4.mean())
errorbar = [std1, std2, std3, std4]
colors = ['lightpink', 'lightblue', 'red', 'darkblue']
dfbar = pd.DataFrame({'(1)Sourcey': aa, '(3)Sourcey Fabric': bb, '(2)PolKA': cc,'(4)PolKA Fabric': dd}, index=x)
ax4 = dfbar.plot.bar(yerr=errorbar, color=colors, rot=0)
ax4.set_ylim(0, ylim)
ax4.tick_params(axis='both', which='major', labelsize=4)
ax4.grid(b=True, which='major', linestyle='dashed', axis='x')
ax4.grid(b=True, which='major', axis='y')
ax4.set_xticks([0,1,2,3,4,5,6,7,8,9,10])
plt.title(title)
plt.ylabel('RTT Latency (s)')
plt.xlabel('Number of Hops')
plt.tight_layout()
plt.savefig(output)
#####################################################
# Latency - Mean - 3 methods #
#####################################################
@staticmethod
def latency_avg_3methods_bar(folder1, folder2, folder3, scenarios, labels, output, title, ylim, fullwidth=False):
plt.figure()
Plotter.latexify(fullwidth=fullwidth)
columns = ['latency']
dfs1 = []
for scenario in scenarios:
file = glob.glob(folder1 + '/' + scenario + '/a1*')
df = pd.read_csv(file[0], header=4, names=columns)['latency']
df.name = labels[scenarios.index(scenario)]
dfs1 += [df]
result1 = pd.concat(dfs1, axis=1)
std1 = result1.std()
dfs2 = []
for scenario in scenarios:
file = glob.glob(folder2 + '/' + scenario + '/a1*')
df = | pd.read_csv(file[0], header=None, names=columns) | pandas.read_csv |
"""
Functions for creating GTF databases using gffutils and using those databases
to annotate alternative events.
"""
from collections import Counter
import itertools
import os
import gffutils
from gffutils.helpers import merge_attributes
import pandas as pd
from ..common import SPLICE_TYPE_ISOFORM_EXONS, OUTRIGGER_DE_NOVO, NOVEL_EXON
from ..region import Region, STRANDS
# Annotations from:
# ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_19/gencode.v19.annotation.gtf.gz
gene_transcript = set(('gene', 'transcript'))
def maybe_analyze(db):
try:
# For gffutils >0.8.7.1
db.analyze()
except AttributeError:
# For compatability with gffutils<=0.8.7.1
db.execute('ANALYZE features')
def transform(f):
if f.featuretype in gene_transcript:
return f
else:
exon_location = '{}:{}:{}-{}:{}'.format(
f.featuretype, f.seqid, f.start, f.stop, f.strand)
exon_id = exon_location
if f.featuretype == 'CDS':
exon_id += ':' + f.frame
f.attributes['location_id'] = [exon_id]
return f
def create_db(gtf_filename, db_filename=None):
db_filename = ':memory:' if db_filename is None else db_filename
db = gffutils.create_db(
gtf_filename,
db_filename,
merge_strategy='merge',
id_spec={'gene': 'gene_id', 'transcript': 'transcript_id',
'exon': 'location_id', 'CDS': 'location_id',
'start_codon': 'location_id',
'stop_codon': 'location_id', 'UTR': 'location_id'},
transform=transform,
force=True,
verbose=True,
disable_infer_genes=True,
disable_infer_transcripts=True,
force_merge_fields=['source'])
maybe_analyze(db)
return db
class SplicingAnnotator(object):
"""Annotates basic features of splicing events: gene ids and names"""
def __init__(self, db, events, splice_type):
"""Annotate splicing events with their respective genes
Parameters
----------
db : gffutils.FeatureDB
Database including all the exons found in the events
events : pandas.DataFrame
Table of events, with the event ids as the index
splice_type : 'se' | 'mxe'
The type of alternative splicing, which informs the exon
configurations for different isoforms
"""
self.db = db
self.events = events
self.splice_type = splice_type
self.isoform_exons = SPLICE_TYPE_ISOFORM_EXONS[
self.splice_type.lower()]
self.exon_cols = list(set(itertools.chain(
*self.isoform_exons.values())))
self.exon_cols.sort()
# Make a dataframe with outrigger.Region objects
self.regions = pd.DataFrame(index=self.events.index)
self.region_cols = ['{}_region'.format(x) for x in self.exon_cols]
for exon_col, region_col in zip(self.exon_cols, self.region_cols):
self.regions[region_col] = self.events[exon_col].map(Region)
# Make introns and copy-pastable genome locations for the whole event
intron_regions = self.regions[self.region_cols].apply(
self.event_introns_regions, axis=1)
self.regions = pd.concat([self.regions, intron_regions], axis=1)
self.region_cols.extend(['intron_region', 'event_region'])
# Add the lengths of exons, introns, event region, and the genome
# location ("name") of each intron
self.lengths = self.regions.applymap(len)
self.lengths.columns = [x.replace('_region', '_length')
for x in self.lengths]
self.lengths = self.lengths.astype(int)
intron_names = intron_regions.applymap(lambda x: x.name)
intron_names.columns = [x.replace('_region', '_location')
for x in intron_names]
self.events = pd.concat([self.events, self.lengths, intron_names],
axis=1)
def maybe_get_feature(self, feature_id):
try:
return self.db[feature_id]
except gffutils.FeatureNotFoundError:
r = Region(feature_id)
feature = location_to_feature(self.db, r.chrom, r.start, r.stop,
r.strand, source=OUTRIGGER_DE_NOVO,
featuretype=NOVEL_EXON)
self.db.update([feature], make_backup=False,
id_spec={NOVEL_EXON: 'location_id'},
transform=transform)
return feature
def attributes(self):
"""Retrieve all GTF attributes for each isoform's event"""
ignore_keys = 'location_id', 'exon_id', 'exon_number'
lines = []
for event_id, row in self.events.iterrows():
attributes = pd.Series(name=event_id)
for isoform, exons in self.isoform_exons.items():
for e in exons:
attributes[e] = row[e]
n_exons = len(exons)
exon_ids = row[exons]
exon_features = [self.maybe_get_feature(exon_id) for
exon_id in exon_ids]
keys = set(itertools.chain(
*[exon.attributes.keys() for exon in exon_features]))
for key in keys:
# Skip the location IDs which is specific to the
# outrigger-built database, and the exon ids which will
# never match up across all exons
if key in ignore_keys:
continue
values = Counter()
for exon_id in exon_ids:
try:
values.update(
self.db[exon_id].attributes[key])
except KeyError:
continue
if len(values) > 0:
# Only use attributes that came up in for all exons
# of the isoform
values = [value for value, count in values.items()
if count == n_exons]
new_key = isoform + '_' + key
attributes[new_key] = ','.join(sorted(values))
lines.append(attributes)
event_attributes = pd.concat(lines, axis=1).T
df = | pd.concat([self.events, event_attributes], axis=1) | pandas.concat |
#!/usr/bin/env python3
from DeepJetCore import DataCollection
import os
from argparse import ArgumentParser
import numpy as np
import matplotlib.pyplot as plt
import math
from multiprocessing import Process
import random
from datastructures import TrainData_NanoML
import plotly.express as px
import pandas as pd
import tqdm
from DeepJetCore.dataPipeline import TrainDataGenerator
parser = ArgumentParser('')
parser.add_argument('inputFile')
parser.add_argument('outputDir')
args = parser.parse_args()
outdir = args.outputDir+'/'
### rewrite!
os.system('mkdir -p '+outdir)
#read a file
def invokeGen(infile):
if infile[-6:] == '.djcdc':
dc = DataCollection(infile)
td = dc.dataclass()
dc.setBatchSize(1)
gen = dc.invokeGenerator()
elif infile[-6:] == '.djctd':
td = TrainData_NanoML()
td.readFromFile(infile)
gen = TrainDataGenerator()
gen.setBatchSize(1)
gen.setBuffer(td)
elif infile[-5:] == '.root':
td = TrainData_NanoML()
td.convertFromSourceFile(infile,{},True)
gen = TrainDataGenerator()
gen.setBatchSize(1)
gen.setBuffer(td)
gen.setSkipTooLargeBatches(False)
nevents = gen.getNBatches()
return gen.feedNumpyData,nevents,td
gen,nevents,td = invokeGen(args.inputFile)
def shuffle_truth_colors(df, qualifier="truthHitAssignementIdx"):
ta = df[qualifier]
unta = np.unique(ta)
unta = unta[unta>-0.1]
np.random.shuffle(unta)
out = ta.copy()
dfo = df.copy()
for i in range(len(unta)):
out[ta ==unta[i]]=i
dfo[qualifier] = out
return dfo
def toDataFrame(thegen, thetd):
def popRSAndSqueeze(df):
for k in df.keys():
if "_rowsplits" in k:
df.pop(k)
else:
df[k] = np.squeeze(df[k])
return df
data,_ = next(thegen())#this is a dict, row splits can be ignored, this is per event
df = thetd.createFeatureDict(data,False)
dftruth = thetd.createTruthDict(data)
df = popRSAndSqueeze(df)
dftruth = popRSAndSqueeze(dftruth)
df['recHitLogEnergy'] = np.log(df['recHitEnergy']+1.+1e-6)
dffeat = pd.DataFrame.from_dict(df)
dftruth = pd.DataFrame.from_dict(dftruth)
df.update(dftruth)
dfall = | pd.DataFrame.from_dict(df) | pandas.DataFrame.from_dict |
# JSON reader & Panda Creator + Updater
import json
import pandas as pd
import os
from typing import List
if __name__ == "functions.fastfield_json_reader":
from functions.new_daily_handler import event_main
else:
from new_daily_handler import event_main
class open_JSON():
def __init__(self,json_file):
self.json_file_location = json_file
self.json_info = self.open_json(json_file)
# self.job_num = self.get_field("JI_job_number")
# self.daily_date = self.get_field("JI_dailyDate")
# self.job_name = self.get_field("JI_project_name")
@staticmethod
def open_json(file_path):
"""opens .json file"""
with open(file_path,encoding="utf-8") as f:
info = json.load(f)
return info
def get_field(self,job_field):
return self.json_info[job_field]
def print_all_fields(self):
for idx, field in enumerate(self.json_info):
print(f"field {idx}: {field}")
class json_combiner(open_JSON):
def __init__(self,json_file_dir,event_handler_files:List = None):
"""takes in a list of json file names, and their directory, and returns a list of open jsons in a list"""
self.dir = json_file_dir
if event_handler_files == None:
self.files = self.grab_files(self.dir,file_ext=".json")
else:
#code for event_handler updating instead of combining all files
self.files = event_handler_files
# combine list of .jsons into a singular list of jsons
self.complete_paths = [os.path.join(self.dir,f) for f in self.files] #hard path joiner
self.json_records = [single_json.json_info for single_json in [open_JSON(json_obj) for json_obj in self.complete_paths]]
def to_existing_dataframe(self,existing_csv_path):
"""append new json's to an existing dataframe()"""
dataset = pd.read_csv(existing_csv_path)
new_data = self.to_dataframe()
final_dataset = dataset.append(new_data)
# print(f"existing data shape:{dataset.shape}")
# print(f"new data shape: {new_data.shape}")
# print(f"Updated Dataset Shape: {final.shape}")
return final_dataset
def to_dataframe(self):
"""returns pandas dataframe and saves it file inside dir"""
self.dataframe = | pd.DataFrame.from_records(self.json_records) | pandas.DataFrame.from_records |
from __future__ import annotations
from typing import List
import pandas as pd
from pathlib import Path
import json
from os import path
import logging
from card_live_dashboard.model.CardLiveData import CardLiveData
from card_live_dashboard.model.RGIParser import RGIParser
from card_live_dashboard.model.data_modifiers.CardLiveDataModifier import CardLiveDataModifier
logger = logging.getLogger(__name__)
class CardLiveDataLoader:
JSON_DATA_FIELDS = [
'rgi_main',
'rgi_kmer',
'mlst',
'lmat',
]
OTHER_TABLE_DROP_FIELDS = JSON_DATA_FIELDS + [
'timestamp',
'geo_area_code',
]
def __init__(self, card_live_data: Path):
self._directory = card_live_data
if self._directory is None:
raise Exception('Invalid value [card_live_data=None]')
self._data_modifiers = []
def add_data_modifiers(self, data_modifiers: List[CardLiveDataModifier]) -> None:
"""
Adds a list of new objects used to apply post modifications to the data.
:param data_modifiers: A list of data modifier objects.
:return: None.
"""
self._data_modifiers.extend(data_modifiers)
def read_or_update_data(self, existing_data: CardLiveData = None) -> CardLiveData:
"""
Given an existing data object, updates the data object with any new files.
:param existing_data: The existing data object (None if all data should be read).
:return: The original (unmodified) data object if no updates, otherwise a new data object with additional data.
"""
input_files = list(Path(self._directory).glob('*'))
if existing_data is None:
return self.read_data(input_files)
elif not self._directory.exists():
raise Exception(f'Data directory [card_live_dir={self._directory}] does not exist')
else:
existing_files = existing_data.files()
input_files_set = {p.name for p in input_files}
files_new = input_files_set - existing_files
# If no new files have been found
if len(files_new) == 0:
logger.debug(f'Data has not changed from {len(input_files_set)} samples, not updating')
return existing_data
else:
logger.info(f'{len(files_new)} additional samples found.')
return self.read_data(input_files)
def read_data(self, input_files: list = None) -> CardLiveData:
"""
Reads in the data and constructs a CardLiveData object.
:param input_files: The (optional) list of input files. Leave as None to read from the configured directory.
The optional list is used so I don't have to re-read the directory after running read_or_update_data().
:return: The CardLiveData object.
"""
if input_files is None:
if not self._directory.exists():
raise Exception(f'Data directory [card_live_dir={self._directory}] does not exist')
else:
input_files = list(Path(self._directory).glob('*'))
json_data = []
for input_file in input_files:
filename = path.basename(input_file)
with open(input_file) as f:
json_obj = json.load(f)
json_obj['filename'] = filename
json_data.append(json_obj)
full_df = pd.json_normalize(json_data).set_index('filename')
full_df = self._replace_empty_list_na(full_df, self.JSON_DATA_FIELDS)
full_df = self._create_analysis_valid_column(full_df, self.JSON_DATA_FIELDS)
full_df['timestamp'] = pd.to_datetime(full_df['timestamp'])
main_df = full_df.drop(columns=self.JSON_DATA_FIELDS)
rgi_df = self._expand_column(full_df, 'rgi_main', na_char='n/a').drop(
columns=self.OTHER_TABLE_DROP_FIELDS)
rgi_kmer_df = self._expand_column(full_df, 'rgi_kmer', na_char='n/a').drop(
columns=self.OTHER_TABLE_DROP_FIELDS)
mlst_df = self._expand_column(full_df, 'mlst', na_char='-').drop(
columns=self.OTHER_TABLE_DROP_FIELDS)
lmat_df = self._expand_column(full_df, 'lmat', na_char='n/a').drop(
columns=self.OTHER_TABLE_DROP_FIELDS)
data = CardLiveData(main_df=main_df,
rgi_parser=RGIParser(rgi_df),
rgi_kmer_df=rgi_kmer_df,
mlst_df=mlst_df,
lmat_df=lmat_df)
# apply data modifiers
for modifier in self._data_modifiers:
data = modifier.modify(data)
return data
def _rows_with_empty_list(self, df: pd.DataFrame, col_name: str):
empty_rows = {}
for index, row in df.iterrows():
empty_rows[index] = (len(row[col_name]) == 0)
return pd.Series(empty_rows)
def _replace_empty_list_na(self, df: pd.DataFrame, cols: List[str]):
dfnew = df.copy()
for column in cols:
empty_rows = self._rows_with_empty_list(df, column)
dfnew.loc[empty_rows, column] = None
return dfnew
def _create_analysis_valid_column(self, df: pd.DataFrame, analysis_cols: List[str]):
df = df.copy()
df['analysis_valid'] = 'None'
for col in analysis_cols:
df.loc[~df[col].isna() & ~(df['analysis_valid'] == 'None'), 'analysis_valid'] = df[
'analysis_valid'] + ' and ' + col
df.loc[~df[col].isna() & (df['analysis_valid'] == 'None'), 'analysis_valid'] = col
return df.replace(' and '.join(self.JSON_DATA_FIELDS), 'all')
def _expand_column(self, df: pd.DataFrame, column: str, na_char: str = None):
"""
Expands a particular column in the dataframe from a list of dictionaries to columns.
That is expands a column like 'col' => [{'key1': 'value1', 'key2': 'value2'}] to a dataframe
with new columns 'col.key1', 'col.key2'.
:param df: The dataframe to use.
:param column: The name of the column to explode.
:param na_char: A character to replace with NA (defaults to no replacement).
:return: A new dataframe with the new columns appended onto it.
"""
exploded_columns = df[column].explode().apply(pd.Series).add_prefix(f'{column}.')
if na_char is not None:
exploded_columns.replace({na_char: None}, inplace=True)
merged_df = | pd.merge(df, exploded_columns, how='left', on=df.index.name) | pandas.merge |
#!/usr/bin/env python
import os
import pandas as pd
def get_supertwists(qmc_out):
""" read supercell twists from QMCPACK output
Args:
qmc_out (str): QMCPACK output, must contain "Super twist #"
Return:
np.array: an array of twist vectors (ntwist, ndim)
"""
from qharv.reel import ascii_out
mm = ascii_out.read(qmc_out)
idxl = ascii_out.all_lines_with_tag(mm, 'Super twist #')
lines = ascii_out.all_lines_at_idx(mm ,idxl)
data = []
for line in lines:
text = ascii_out.lr_mark(line, '[', ']')
vec = np.array(text.split(), dtype=float)
data.append(vec)
mat = np.array(data)
return mat
def epl_val_err(epl_out):
""" convert epl_out to a pandas DataFrame.
epl_out is expected to be an output of energy.pl from QMCPACK
It simply has to have the format {name:22c}={val:17.3f} +/- {err:26.4f}.
rows with forces will be recognized with 'force_prefix'
Args:
epl_out (str): energy.pl output filename
Returns:
pd.DataFrame: df contains columns ['name','val','err']
"""
tab = pd.read_csv(epl_out, delimiter='=', names=['name', 'text'])
tab = tab.dropna()
def text2val(text):
tokens = text.split('+/-')
if len(tokens) != 2:
raise NotImplementedError('unrecognized value '+text)
val,err = map(float, tokens)
return pd.Series({'val':val, 'err':err})
df = pd.concat([
tab.drop('text', axis=1),
tab['text'].apply(text2val)],
axis=1)
return df
def epldf_to_entry(df):
names = [name.strip() for name in df.name.values]
ymean = ['%s_mean' % name for name in names]
yerror = ['%s_error' % name for name in names]
names1 = np.concatenate([ymean, yerror])
means = df.val.values
errs = df.err.values
entry = pd.Series(np.concatenate([means, errs]), names1)
return entry
def get_forces(df, natom, prefix='force', ndim=3):
yml = []
yel = []
for iatom in range(natom):
for idim in range(ndim):
col = '%s_%d_%d' % (prefix, iatom, idim)
sel = df.name.apply(lambda x: col in x)
y1m = df.loc[sel].val.squeeze()
y1e = df.loc[sel].err.squeeze()
yml.append(y1m)
yel.append(y1e)
return np.array(yml), np.array(yel)
def sk_from_fs_out(fs_out):
""" extract fluctuating S(k) from qmcfinitesize output
returns: kmag,sk,vk,spk,spsk
kmag: magnitude of kvectors, sk: raw fluc. S(k), vk: long-range potential after break-up
spk: kmags for splined S(k), spsk: splined S(k) """
import reader
bint = reader.BlockInterpreter()
sfile = reader.SearchableFile(fs_out)
# read raw data
block_text = sfile.block_text('#SK_RAW_START#','#SK_RAW_STOP#')
kmag,sk,vk = bint.matrix(block_text[block_text.find('\n')+1:]).T
# read splined S(k)
block_text = sfile.block_text('#SK_SPLINE_START#','#SK_SPLINE_STOP#')
spk,spsk = bint.matrix(block_text[block_text.find('\n')+1:]).T
return kmag,sk,vk,spk,spsk
# end def
# =============== complicated functions ===============
import numpy as np
from copy import deepcopy
def read_jastrows(jas_node):
""" 'jas_node' should be an xml node containing bspline jastrows
put coefficients and attributes into a list of dictionaries """
if (jas_node.attrib["type"] != "Two-Body"): # works for one-body! miracle!
pass#raise TypeError("input is not a two-body Jastrow xml node")
elif (jas_node.attrib["function"].lower() != "bspline"):
raise NotImplementedError("can only handle bspline Jastrows for now")
# end if
data = []
for corr in jas_node.xpath('./correlation'):
coeff = corr.xpath('./coefficients')[0]
entry = deepcopy( corr.attrib )
entry.update(coeff.attrib)
entry['coeff'] = np.array(coeff.text.split(),dtype=float)
entry['type'] = jas_node.attrib['type']
data.append(entry)
# end for corr
return data
# end def read_jastrows
from lxml import etree
def extract_jastrows(qmcpack_input,json_name='jas.json',warn=True,force_refresh=False):
""" given a QMCPACK input that contains linear optimization, extract all printed Jastrows and store in a local database
1. parse 'qmcpack_input' for the qmc[@metho="linear"] section
2. for each *.opt.xml, parse if it exists
3. parse each opt.xml and make local database """
failed = False
subdir = os.path.dirname(qmcpack_input)
target_json = os.path.join(subdir,json_name)
if os.path.isfile(target_json) and (not force_refresh):
if warn:
print("skipping %s" % subdir)
# end if
return 0 # skip ths file
# end if
parser = etree.XMLParser(remove_blank_text=True)
# get prefix
xml = etree.parse(qmcpack_input,parser)
proj = xml.xpath("//project")[0]
prefix = proj.attrib['id']
# determine number of optimization loops
all_qmc_sections = xml.xpath('.//qmc[@method="linear"]')
all_iopt = 0 # track multiple 'linear' sections
data = []
for qmc_section in all_qmc_sections:
# for each linear optimization:
# find the number of loops
nopt = 1
loop = qmc_section.getparent()
if loop.tag == 'loop':
nopt = int(loop.attrib['max'])
# end if
# collect all jastrow coefficients
for iopt in range(nopt):
# get optimization file
opt_file = prefix + ".s%s.opt.xml" % str(all_iopt).zfill(3)
opt_xml = os.path.join(subdir,opt_file)
if not os.path.isfile(opt_xml):
if warn:
print("skipping %d in %s" % (all_iopt,subdir))
# end if
continue
# end if
# parse optimization file
opt = etree.parse(opt_xml,parser)
jnodes = opt.xpath('//jastrow')
for jas_node in jnodes:
entries = read_jastrows(jas_node)
for entry in entries:
entry['iopt'] = all_iopt
# end for entry
data.append(entry)
# end for
all_iopt += 1
# end for iopt
# end for qmc_section
if len(data) == 0:
failed = True
else:
df = pd.DataFrame( data )
df.to_json(target_json)
# end if
return failed
# end def extract_jastrows
def extract_best_jastrow_set(opt_input,opt_json='opt_scalar.json',nequil='auto',force_refresh=False):
import nexus_addon as na
subdir = os.path.dirname(opt_input)
# locally create jas.json
extract_jastrows(opt_input,force_refresh=force_refresh)
# locally create opt_scalar.json
scalar_json = os.path.join(subdir,opt_json)
if (not os.path.isfile(scalar_json)) or force_refresh:
# initialize analyzer
from qmca import QBase
options = {"equilibration":nequil}
QBase.options.transfer_from(options)
entry = na.scalars_from_input(opt_input)
pd.DataFrame(entry).to_json(scalar_json)
# end if
# get best jastrow set
best_jas = collect_best_jastrow_set(subdir)
return best_jas
# end def extract_best_jastrow_set
def collect_best_jastrow_set(subdir,jas_json='jas.json',opt_json='opt_scalar.json'
,rval_weight=0.75,rerr_weight=0.25):
""" find best set of jastrows in 'subdir', assume files:
1. jas.json: a database of QMCPACK bspline jastrows with 'iopt' column
2. opt_scalar.json: a database of QMCPACK scalars including 'LocalEnergy_mean', 'LocalEnergy_error', 'Variance_mean', and 'Variance_error' """
from dmc_database_analyzer import div_columns
jfile = os.path.join(subdir,jas_json)
if not os.path.isfile(jfile):
raise RuntimeError('%s not found in %s' % (jfile,subdir))
# end if
ofile = os.path.join(subdir,opt_json)
if not os.path.isfile(ofile):
raise RuntimeError('%s not found in %s' % (ofile,subdir))
# end if
jdf = | pd.read_json(jfile) | pandas.read_json |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from preprocessing import preprocess
from tokenizer import tokenize
class ThaiPreprocessor(BaseEstimator, TransformerMixin):
def fit(self, X, y=None, **fit_params):
return self
def preprocess(self, text: str) -> str:
return preprocess(text)
def transform(self, X) -> pd.Series:
return pd.Series(X).apply(self.preprocess)
class ThaiTokenizer(BaseEstimator, TransformerMixin):
def __init__(self, remove_placeholders: bool = False, min_char: bool = 2):
self.remove_placeholders = remove_placeholders
self.min_char = min_char
def fit(self, X, y=None, **fit_params):
return self
def tokenize(self, text):
tokens = tokenize(
text,
min_char=self.min_char,
remove_placeholder=self.remove_placeholders,
)
return tokens
def transform(self, X) -> pd.Series:
return | pd.Series(X) | pandas.Series |
import logging
from abc import abstractmethod
from typing import Union
import dask.dataframe as dd
import pandas as pd
from Bio import SeqIO
import openomics
from openomics.utils.read_gtf import read_gtf
from .base import Database
# from gtfparse import read_gtf
class SequenceDatabase(Database):
"""Provides a series of methods to extract sequence data from
SequenceDataset.
"""
def __init__(self, replace_U2T=False, **kwargs):
"""
Args:
replace_U2T:
**kwargs:
"""
self.replace_U2T = replace_U2T
super(SequenceDatabase, self).__init__(**kwargs)
@abstractmethod
def read_fasta(self, fasta_file:str, replace_U2T:bool, npartitions=None):
"""Returns a pandas DataFrame containing the fasta sequence entries.
With a column named 'sequence'.
Args:
fasta_file (str): path to the fasta file, usually as
self.file_resources[<file_name>]
replace_U2T (bool):
npartitions:
"""
raise NotImplementedError
@abstractmethod
def get_sequences(self, index:str, omic:str, agg_sequences:str, **kwargs):
"""Returns a dictionary where keys are 'index' and values are
sequence(s).
Args:
index (str): {"gene_id", "gene_name", "transcript_id",
"transcript_name"}
omic (str): {"lncRNA", "microRNA", "messengerRNA"}
agg_sequences (str): {"all", "shortest", "longest"}
**kwargs: any additional argument to pass to
SequenceDataset.get_sequences()
"""
raise NotImplementedError
@staticmethod
def get_aggregator(agg:Union[str, callable]=None):
"""Returns a function used aggregate a list of sequences from a groupby
on a given key.
Args:
agg: One of ("all", "shortest", "longest"), default "all". If "all",
then for all
"""
if agg == "all":
agg_func = lambda x: list(x) if not isinstance(x, str) else x
elif agg == "shortest":
agg_func = lambda x: min(x, key=len)
elif agg == "longest":
agg_func = lambda x: max(x, key=len)
elif isinstance(agg, callable):
return agg
else:
raise Exception(
"agg_sequences argument must be one of {'all', 'shortest', 'longest'}"
)
return agg_func
class GENCODE(SequenceDatabase):
"""Loads the GENCODE database from https://www.gencodegenes.org/ .
Default path: ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_32/ .
Default file_resources: {
"basic.annotation.gtf": "gencode.v32.basic.annotation.gtf.gz",
"long_noncoding_RNAs.gtf": "gencode.v32.long_noncoding_RNAs.gtf.gz",
"lncRNA_transcripts.fa": "gencode.v32.lncRNA_transcripts.fa.gz",
"transcripts.fa": "gencode.v32.transcripts.fa.gz",
}
"""
def __init__(
self,
path="ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_32/",
file_resources=None,
col_rename=None,
npartitions=0,
replace_U2T=False,
remove_version_num=False,
):
"""
Args:
path:
file_resources:
col_rename:
npartitions:
replace_U2T (bool): Whether to replace nucleotides from U to T on
the RNA primary sequences.
remove_version_num (bool): Whether to drop the version number on the
ensembl ID.
"""
if file_resources is None:
file_resources = {
"basic.annotation.gtf": "gencode.v32.basic.annotation.gtf.gz",
"long_noncoding_RNAs.gtf":
"gencode.v32.long_noncoding_RNAs.gtf.gz",
"lncRNA_transcripts.fa":
"gencode.v32.lncRNA_transcripts.fa.gz",
"transcripts.fa": "gencode.v32.transcripts.fa.gz",
}
self.remove_version_num = remove_version_num
super(GENCODE, self).__init__(
path=path,
file_resources=file_resources,
col_rename=col_rename,
replace_U2T=replace_U2T,
npartitions=npartitions,
)
def load_dataframe(self, file_resources, npartitions=None):
"""
Args:
file_resources:
npartitions:
"""
dfs = []
for filename, content in file_resources.items():
if ".gtf" in filename:
df = read_gtf(content,
npartitions=npartitions,
compression="gzip")
dfs.append(df)
if npartitions:
annotation_df = dd.concat(dfs)
else:
annotation_df = | pd.concat(dfs) | pandas.concat |
from datetime import date
from components.transaction_store_block import TransactionStoreInput
from components.transactions_table_block import TransactionTableComponent, TransactionTableInput
from core.transaction_journal import TransactionJournal, TransactionJournalConfig
import piecash
import dash
from dash import html, dash_table as dt
import pandas as pd
from components import ForecastComponent, ForecastComponentInput, TransactionStore
from core.typings import BalanceType
def dash_test():
app = dash.Dash(__name__)
book = piecash.open_book(
"/mnt/c/Users/guilh/Documents/Gnucash/personal-sqlite.gnucash", open_if_lock=True)
config = TransactionJournalConfig(
checkings_parent_guid="3838edd7804247868ebed2d2404d4c26",
liabilities_parent_guid="44a238b52fdd44c6bad26b9eb5efc219"
)
journal = TransactionJournal(book=book, config=config)
transaction_data = journal.get_transaction_data(date(2022, 2, 1), date(2022, 10, 1))
transaction_store = TransactionStore(input=TransactionStoreInput(data=transaction_data))
forecast = ForecastComponent(app=app, input=ForecastComponentInput(store_name=transaction_store.get_name()))
table = TransactionTableComponent(app=app, input=TransactionTableInput(
store_name=transaction_store.get_name(),
graph_name=forecast.get_name()
))
app.layout = html.Div([
html.Div(id="store-container"),
html.Div(id="graph-container"),
html.Div(id="table-container")
])
app.layout["graph-container"] = forecast.layout
app.layout["table-container"] = table.layout
app.layout["store-container"] = transaction_store.layout
app.run_server(debug=True)
def component_test():
app = dash.Dash(__name__)
d = {
'date': [1, 2],
'balance': [100, 111],
'scheduled': [False, True],
'type': [BalanceType.CHECKINGS, BalanceType.LIABILITIES]
}
data = | pd.DataFrame(data=d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Classes and methods used to implement specific forms of dynamic analysis
@author: rihy
"""
from __init__ import __version__ as currentVersion
import numpy
import timeit
import itertools
import matplotlib.pyplot as plt
import dill
import pandas as pd
#import multiprocessing # does not work with Spyder!
import tstep
import loading
import msd_chain
from common import chunks
#%%
class Dyn_Analysis():
"""
Base class for dynamic analysis implementations
_Inheritance expected_
"""
def __init__(self,name:str,dynsys_obj,loading_obj):
"""
Initialisation function
_All derived classes are expected to run this function_
"""
self.name = name
"""
String identifier for object
"""
self.dynsys_obj = dynsys_obj
"""
Dynamic system to which analysis relates
"""
self.loading_obj = loading_obj
"""
Object defining applied loading
"""
def run(self):
"""
Runs dynamic analysis
_Inheritance expected_
"""
pass
def _pickle_fName(self,fName):
"""
Defines default filename for pickle files
"""
if fName is None:
fName = "{0}".format(self.__class__.__name__)
if self.name is not None:
fName += "{0}".format(self.name)
fName += ".pkl"
return fName
def save(self,fName=None):
"""
Serialises object to file `fName`
"""
fName=self._pickle_fName(fName)
with open('{0}'.format(fName), 'wb') as dill_file:
print("Serialising `{0}` object to `{1}`".format(self.__class__.__name__,fName))
dill.dump(self, dill_file)
print("Serialisation complete!")
class MovingLoadAnalysis(Dyn_Analysis):
"""
Class to implement moving load analysis
***
_Moving load analysis_ involves the determining the response of the system
to groups of point loads moving in a pre-defined manner along a defined
track.
_Moving loads_ include, but are not limited to:
* Train axles, which are usually at pre-defined spacings
* Vehicle axles
* Forces to represent the actions of walkers/joggers per UK NA to
BS EN 1991-2
"""
def __init__(self,
modalsys_obj,
name=None,
loadtrain_obj=None,
loadtrain_fName="loadDefs.csv",
loadVel=5.0,
use_abs_modeshape=False,
tEpilogue=10.0,
dt=None,
dt_loads=0.01,
max_dt=None,
retainDOFTimeSeries=True,
retainResponseTimeSeries=True,
writeResults2File=False,
results_fName="results.csv"):
"""
Initialisation function
***
Required:
* `modalsys_obj`, modal system to which analysis relates
**Important note:** `modalsys_obj` must be a modal system, as the
action of the moving loads is determined by obtaining the
mode-generalised force functions for each mode. _This is checked_.
***
Optional:
* `loadtrain_obj`, load train object defining load pattern. If _None_
then `loadtrain_fName` must be provided (see below)
* `loadtrain_fName", file containing load train definition
* `loadVel`, constant velocity of load pattern (m/s)
* `loadDefs_fName`, file containing load definitions
"""
# Handle None for loadtrain_obj
if loadtrain_obj is None:
loadtrain_obj = loading.LoadTrain(fName=loadtrain_fName)
# Check class name of modalsys_obj
if modalsys_obj.__class__.__name__ != "ModalSys":
raise ValueError("`modalsys_obj`: instance of `ModalSys` class expected!")
# Check loadtrain_obj is of class 'LoadTrain' or derived class
if loadtrain_obj.__class__.__name__ != "LoadTrain":
base_class_name = loadtrain_obj.__class__.__bases__[0].__name__
if base_class_name != "LoadTrain":
raise ValueError("`loadtrain_obj`: instance of `LoadTrain` "+
"class (or derived classes) expected!\n" +
"`loadtrain_obj` base class: %s"
% base_class_name)
# Run parent init
super().__init__(name,modalsys_obj,loadtrain_obj)
# Save details as attributes
self.loadVel = loadVel
"""
Velocity of load pattern along track
"""
# Define time-stepping analysis
tStart, tEnd = self._CalcSimDuration(loadVel=loadVel,
tEpilogue=tEpilogue)
# Define force function for parent system and subsystems
modalForces_func = modalsys_obj.CalcModalForces(loading_obj=loadtrain_obj,
loadVel=loadVel,
dt=dt_loads,
use_abs_modeshape=use_abs_modeshape)
force_func_dict = {modalsys_obj : modalForces_func}
self.tstep_obj = tstep.TStep(modalsys_obj,
name=name,
tStart=tStart,
tEnd=tEnd,
dt=dt,
max_dt=max_dt,
force_func_dict=force_func_dict,
retainDOFTimeSeries=retainDOFTimeSeries,
retainResponseTimeSeries=retainResponseTimeSeries,
writeResults2File=writeResults2File,
results_fName=results_fName
)
"""
Time-stepping solver object
"""
self.results_obj = self.tstep_obj.results_obj
"""
Results object
"""
# Create relationship to this analysis object
self.results_obj.analysis_obj = self
def run(self,
verbose=True,
saveResults=False,
save_fName=None):
"""
Runs moving load analysis, using `tstep.run()`
_Refer documentation for that function for more details_
"""
if verbose:
print("***** Running `%s`..." % self.__class__.__name__)
print("Dynamic system: '{0}'".format(self.dynsys_obj.name))
print("Load pattern: '{0}'".format(self.loading_obj.name))
print("Load velocity: %.1f" % self.loadVel)
tic=timeit.default_timer()
results_obj = self.tstep_obj.run(verbose=verbose)
toc=timeit.default_timer()
if verbose:
print("***** Analysis complete after %.3f seconds." % (toc-tic))
if saveResults:
self.save(fName=save_fName)
return results_obj
def _CalcSimDuration(self,loadVel=10.0,tEpilogue=5.0):
"""
Calculates the required duration for time-stepping simulation
***
Optional:
* `loadVel`, constant velocity (m/s) of the load pattern
* `tEpilogue`, additional time to run following exit of last load from
track overlying the dynamic system in question.
In the case of `Ltrack` and `loadLength`, if _None_ is provided then
function will attempt to obtain this information from class attributes.
***
Returns:
tStart, `tEnd`: start and end times (secs) for simulation
"""
#print("tEpilogue = %.3f" % tEpilogue)
modalsys_obj = self.dynsys_obj
loadtrain_obj = self.loading_obj
# Get length of track along which loading is running
attr = "Ltrack"
obj = modalsys_obj
if hasattr(obj,attr):
Ltrack = getattr(obj,attr)
else:
raise ValueError("`{0}` not defined!".format(attr))
# Get length of load train
attr = "loadLength"
obj = loadtrain_obj
if hasattr(obj,attr):
loadLength = getattr(obj,attr)
else:
raise ValueError("`{0}` not defined!".format(attr))
# Determine time required for all loads to pass along track
tStart=0
Ltotal = Ltrack + loadLength
tEnd = Ltotal/loadVel + tEpilogue
return tStart, tEnd
def PlotResults(self,dofs2Plot=None):
"""
Plots results using `tstep_results.PlotResults()`.
_Refer documentation from that function for further details_
"""
self.results_obj.PlotResults(dofs2Plot=dofs2Plot)
class Multiple():
"""
Function to run multiple dynamic analyses and provide functionality to
store, plot and analyse results from multiple dynamic analyses in a
systematic manner
"""
def __init__(self,
classDef,
dynsys_obj:object,
writeResults2File:bool=False,
retainDOFTimeSeries:bool=False,
retainResponseTimeSeries:bool=False,
**kwargs):
"""
Initialisation function
****
Required:
* `classDef`, `Dyn_Analysis` class definition (usually inherited)
that implements the required analysis type
* `dynsys_obj`, dynamic system to which analysis relates
***
Optional:
* `retainResponseTimeSeries`, _boolean_, denotes whether detailed
_response_ time series results can be deleted once summary
statistics have been computed
* `retainDOFTimeSeries`, _boolean_, denotes whether detailed _DOF_
time series results can be deleted once summary statistics have been
computed
By default _False_ is assigned to be above (contrary to usual defaults)
as running multiple analyses would otherwise often lead to large
memory demands.
"""
className = classDef.__name__
print("Initialising multiple `{0}`".format(className))
if className == "MovingLoadAnalysis":
kwargs2permute = ["loadVel","loadtrain_obj"]
elif className == "UKNA_BSEN1991_2_walkers_joggers":
kwargs2permute = ["analysis_type","mode_index"]
else:
raise ValueError("Unsupported class name!")
# Get input arguments to permute
vals2permute={}
for key in kwargs2permute:
# Get list as supplied via **kwargs
if key in kwargs:
vals_list = kwargs[key]
del kwargs[key]
# Convert to list if single
if not isinstance(vals_list,list):
vals_list = [vals_list]
vals2permute[key]=vals_list
else:
raise ValueError("'{0}' ".format(key) +
"included in `kwargs2permute` list but " +
"list of values to permute not provided!")
# Populate object array with initialised objects
vals_list = list(itertools.product(*vals2permute.values()))
analysis_list=[]
nAnalysis = len(vals_list)
for i in range(nAnalysis):
# Prepare dict of key arguments to pass
kwargs_vals = vals_list[i]
kwargs_dict = dict(zip(kwargs2permute, kwargs_vals))
# Append any general kwargs provided
results_fName = "results/analysis%04d.csv" % (i+1)
kwargs_dict.update(kwargs)
kwargs_dict.update({"retainDOFTimeSeries":retainDOFTimeSeries,
"retainResponseTimeSeries":retainResponseTimeSeries,
"writeResults2File":writeResults2File,
"results_fName":results_fName})
# Initialisise new analysis object
analysis_list.append(classDef(name="%04d"% i,
modalsys_obj=dynsys_obj,
**kwargs_dict))
self.dynsys_obj = dynsys_obj
"""
`DynSys` object to which analysis relates
"""
self.analysisType = className
"""
Class name of `dyn_analysis` derived class
"""
self.vals2permute = vals2permute
"""
Dictionary of keywords and values to permute in the multiple analyses
defined
"""
self.vals2permute_shape = tuple([len(x) for x in self.vals2permute.values()])
"""
Tuple to denote the shape of lists specified in `vals2permute`
"""
self.results_arr=None
"""
ndarray of `tstep_results` object instances
"""
self.stats_dict=None
"""
Dict of ndarrays containing stats (max, min, std, absmax) for each of
the analyses carried out, for each of the responses defined
"""
self.analysis_list = analysis_list
"""
List of `Dyn_Analysis` object instances, each of which defines a
dynamic analysis to be performed
"""
def run(self,save=True,solveInParallel=False):
"""
Runs multiple dynamic analyses, as defined by `__init__`
***
In principle this can be done in parallel, to efficiently use all
avaliable cores and give improve runtime.
_Parallel processing not yet implemented due to outstanding bug
associated with using `multiprocessing` module from within Spyder_
"""
print("Running multiple `{0}`\n".format(self.analysisType))
tic=timeit.default_timer()
# Run analyses using parallel processing (if possible)
if solveInParallel:
print("Parallel processing not yet implemented due to " +
"outstanding bug associated with using "+
"`multiprocessing` module from within Spyder\n"+
"A single-process analysis will be carried out instead.")
# Run all pre-defined analyses
for i, x in enumerate(self.analysis_list):
print("Analysis #%04d of #%04d" % (i+1, len(self.analysis_list)))
x.run(saveResults=False)
print("")#clear line for emphasis
toc=timeit.default_timer()
print("Multiple `%s` analysis complete after %.3f seconds!\n"
% (self.analysisType,(toc-tic)))
# Reshape results objects in ndarray
results_obj_list = [x.tstep_obj.results_obj for x in self.analysis_list]
reqdShape = self.vals2permute_shape
results_arr = numpy.reshape(results_obj_list,reqdShape)
self.results_arr = results_arr
# Collate statistics
self.collate_stats()
# Pickle results
if save:
self.save()
def plot_stats(self,
stat='absmax',
sys=None,
**kwargs):
"""
Produces a plot of a given statistic, taken across multiple analyses
Optional:
* `stat`, name of statistic to be plotted (e.g. 'max', 'min')
* `sys`, instance of `DynSys` class, or string to denote name of system,
used to select system whose outputs are to be plotted.
If None then seperate figures will be produced for each subsystem.
_See docstring for `plot_stats_for_system()` for other keyword
arguments that may be passed._
"""
# Get list of system names to loop over
if sys is None:
sys_names = [obj.name for obj in self.dynsys_obj.DynSys_list]
else:
# Get name of system
if isinstance(sys,str):
sys_name = sys
else:
sys_name = sys.name # get name from object
sys_names = [sys_name] # list of length 1
# Produce a seperate figure for each sub-system responses
fig_list = []
fig_list = []
for sys_name in sys_names:
_fig_list = self.plot_stats_for_system(sys_name=sys_name,
stat=stat,
**kwargs)
fig_list.append(_fig_list)
# Return list of figures, one for each subsystem
return fig_list
def plot_stats_for_system(self,sys_name,stat,
max_responses_per_fig:int=5,
subplot_kwargs={}
):
"""
Produces a plot of a given statistic, taken across multiple analyses
for a specified sub-system
Required:
* `sys_name`, string giving name of system
* `stat`, string to specify statistic to be plotted. E.g. 'absmax'
Optional:
* `max_responses_per_fig`, integer to denote maximum number of
responses to be plotted in each figure. If None, all responses will
be plotted via a single figure. Default value (=4) should give
nice plots in most cases.
_Users are advised to tweak the appearance
of figures, e.g. using the `pyplot.subplots_adjust()` method._
* `subplot_kwargs`, dict of keyword arguments to be passed to
`pyplot.subplots()` method, to customise subplots (e.g. share axes)
"""
# Re-collate statistics as required
if self.stats_df is None:
stats_df = self.collate_stats()
else:
stats_df = self.stats_df
# Slice for requested stat
try:
stats_df = stats_df.xs(stat,level=-1,axis=1)
except KeyError:
raise KeyError("Invalid statistic selected!")
# Get stats for just this system
df_thissys = stats_df.xs(sys_name,level=0,axis=0)
# Obtain responses names for this subsystem
response_names = df_thissys.index.values
nResponses = len(response_names)
if max_responses_per_fig is None:
max_responses_per_fig = nResponses
fig_list = []
for _response_names in chunks(response_names,max_responses_per_fig):
# Create figure, with one subplot per response
fig, axlist = plt.subplots(len(_response_names),
sharex=True,
**subplot_kwargs)
fig_list.append(fig)
for i, (r,ax) in enumerate(zip(_response_names,axlist)):
# Get series for this response
df = df_thissys.loc[r]
# Reshape such that index will be x-variable for plot
df = df.unstack()
# Make plot
ax = df.plot(ax=ax,legend=False)
ax.set_ylabel(r,
rotation=0,
fontsize='small',
horizontalAlignment='right',
verticalAlignment='center')
if i==0:
# Add legend to figure
fig.legend(ax.lines, df.columns,fontsize='x-small')
fig.subplots_adjust(left=0.15,right=0.95)
fig.align_ylabels()
return fig_list
def _pickle_fName(self,fName):
"""
Defines default filename for pickle files
"""
if fName is None:
fName = "{0}_{1}".format(self.__class__.__name__,self.analysisType)
if self.dynsys_obj.name is not None:
fName += "_{0}".format(self.dynsys_obj.name)
fName += ".pkl"
return fName
def save(self,fName=None):
"""
Serialises object to file `fName`
"""
fName=self._pickle_fName(fName)
with open('{0}'.format(fName), 'wb') as dill_file:
print("\nSerialising `{0}` object to `{1}`".format(self.__class__.__name__,fName))
dill.dump(self, dill_file)
print("Serialisation complete!\n")
def collate_stats(self):
"""
Collates computed statistics into a Pandas DataFrame, as follows:
* Index is a MultiIndex, comprising the following levels:
* 0 : Name of sub-system
* 1 : Name of output / response
* Columns are a MultiIndex comprising the following levels:
* 0 to -2 : Variables of analysis `vals2permute`
* -1 : Statistic name (e.g. 'max', 'min')
"""
print("Collating statistics...")
# Get list of all tstep_results objects associate with Multiple()
if self.results_arr is None:
raise ValueError("self.results_arr=None! No results to collate!")
# Get lists of inputs permuted for analyses
kwargs2permute = list(self.vals2permute.keys())
vals2permute = list(self.vals2permute.values())
# Where list contains objects, get their names
for i in range(len(vals2permute)):
if hasattr(vals2permute[i][0],'name'):
vals2permute[i] = [x.name for x in vals2permute[i]]
# Get list of systems and subsystems
DynSys_list = self.dynsys_obj.DynSys_list
stats_df = None
for i, (index, results_obj) in enumerate(numpy.ndenumerate(self.results_arr)):
# Get combination of vals2permute for results_obj
combination = [vals[i] for i, vals in zip(index, vals2permute)]
# Get stats from results_obj
stats_df_inner = results_obj.get_response_stats_df()
# Loop over all sub-systems
for df, sys in zip(stats_df_inner,DynSys_list):
# Prepend system to index
df = pd.concat([df],
axis=0,
keys=[sys.name],
names=['System','Response'])
# Prepare combination values to column MultiIndex
tuples = [(*combination,col) for col in df.columns]
df.columns = pd.MultiIndex.from_tuples(tuples)
df.columns.names = [*kwargs2permute,'Statistic']
# Append results to DataFrame
stats_df = | pd.concat([stats_df, df],axis=1) | pandas.concat |
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
def get_global_score(npz_path: str):
x = np.load(npz_path)
global_score = np.mean(x['lddt'])
return global_score
def get_df(dir_path: Path) -> pd.DataFrame:
model_name_list = []
global_score_list = []
for npz_path in dir_path.glob('*.npz'):
global_score = get_global_score(str(npz_path))
model_name_list.append(npz_path.stem)
global_score_list.append(global_score)
method_name = dir_path.stem
df = pd.DataFrame({method_name: global_score_list}, index=model_name_list)
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('score_dir', type=str, help='directory of local score. ex) ../../../score/dataset/target')
args = parser.parse_args()
score_dir = Path(args.score_dir)
standard_score_dir = score_dir / 'DeepAccNet'
standard_df = get_df(standard_score_dir)
msa_score_dir = score_dir / 'DeepAccNet-Bert'
msa_df = get_df(msa_score_dir)
df = | pd.merge(standard_df, msa_df, left_index=True, right_index=True, how='outer') | pandas.merge |
import json
import math
import random
import numpy as np
import pandas as pd
from scipy.stats import norm
from sklearn import linear_model
from math import sqrt
from DataSynthesizer.lib.utils import read_json_file
from FAIR.FairnessInRankings import FairnessInRankingsTester
def save_uploaded_file(file, current_file):
"""
Save user uploaded data on server.
Attributes:
file: the uploaded dataset.
current_file: file name with out ".csv" suffix
"""
with open(current_file+".csv", 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def get_score_scatter(current_file,top_K=100):
"""
Generated data for scatter plot.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of data size that scatter plot included
Return: data for scatter plot using HighChart format
"""
data = pd.read_csv(current_file+"_weightsum.csv").head(top_K)
scatter_points = []
score_value = data["GeneratedScore"].tolist()
position_value = [x for x in range(1, len(data) + 1)]
for i in range(len(score_value)):
scatter_points.append([position_value[i], score_value[i]])
return scatter_points
def getAttValueCountTopAndOverall(input_data, att_name, top_K=10):
"""
Subfunction to count values of input attribute in the data for top 10 and overall pie chart.
Attributes:
input_data: dataframe that store the input data
att_name: name of attribuet to count
top_K: top k position to count the value, default value is 10
Return: json data includes two two-dimension arrays for value and its count at top 10 and overall
"""
counts_all = {}
all_values_count = input_data[att_name].value_counts()
top_data = input_data[0:top_K]
# get overall counts
new_values_all = []
for i in range(len(all_values_count)):
cur_cate = all_values_count.index[i]
# if not a string, then encode it to the type that is JSON serializable
if not isinstance(cur_cate, str):
cur_cate = str(cur_cate)
cur_count = int(all_values_count.values[i])
new_values_all.append([cur_cate,cur_count])
counts_all["overall"] = new_values_all
# get top K counts and make sure list of counts include every value of input attribute for consistent pie chart colors
top_values_count = top_data[att_name].value_counts()
top_cates = top_values_count.index
# generate a dict to store the top k value counts
top_values_count_dic = {}
for i in range(len(top_values_count)):
top_values_count_dic[top_values_count.index[i]] = int(top_values_count.values[i])
# generate a new value list for top K using same order as in over all list
new_values_top = []
for i in range(len(all_values_count)):
cur_cate = all_values_count.index[i]
# if not a string, then encode it to the type that is JSON serializable
if not isinstance(cur_cate, str):
str_cur_cate = str(cur_cate)
else:
str_cur_cate = cur_cate
if cur_cate in top_cates: # exiting in top K
new_values_top.append([str_cur_cate, top_values_count_dic[cur_cate]])
else:
new_values_top.append([str_cur_cate, 0])
counts_all["topTen"] = new_values_top
return counts_all
def get_chart_data(current_file, att_names):
"""
Generated data for pie chart.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
att_names: list of attribute names to compute the chart data
Return: json data for pie chart plot using HighChart format
"""
data = pd.read_csv(current_file + "_weightsum.csv")
pie_data = {}
for ai in att_names:
cur_ai_json = {}
counts_all = getAttValueCountTopAndOverall(data,ai)
cur_ai_json["topTen"] = counts_all["topTen"]
cur_ai_json["overall"] = counts_all["overall"]
pie_data[ai] = cur_ai_json
return pie_data
def computeSlopeOfScores(current_file,top_K, round_default=2):
"""
Compute the slop of scatter plot.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of data size to compute the slope
Return: slope of scatter plot of top_K data
"""
data = pd.read_csv(current_file + "_weightsum.csv")
top_data = data[0:top_K]
xd = [i for i in range(1,top_K+1)]
yd = top_data["GeneratedScore"].tolist()
# determine best fit line
par = np.polyfit(xd, yd, 1, full=True)
slope = par[0][0]
return round(slope,round_default)
def compute_correlation(current_file,y_col="GeneratedScore",top_threshold=3,round_default=2):
"""
Compute the correlation between attributes and generated scores.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
y_col: column name of Y variable
top_threshold: threshold of number of returned correlated attribute
round_default: threshold of round function for the returned coefficient
Return: list of correlated attributes and its coefficients
"""
# get the data for generated ranking
ranking_df = pd.read_csv(current_file+"_weightsum.csv")
# get the upload data for correlation computation
upload_df = pd.read_csv(current_file+".csv")
numeric_atts = list(upload_df.describe().columns)
X = upload_df[numeric_atts].values
# no need to standardize data
# scaler = StandardScaler()
# transform_X = scaler.fit_transform(X)
y = ranking_df[y_col].values
regr = linear_model.LinearRegression(normalize=False)
regr.fit(X, y)
# get coeff's, ordered by significance
# format weight with decile of 3
for i in range(len(regr.coef_)):
regr.coef_[i] = round(regr.coef_[i], round_default)
# normalize coefficients to [-1,1]
max_coef = max(regr.coef_)
min_coef = min(regr.coef_)
abs_max = max(abs(max_coef),abs(min_coef))
stand_coef = []
for ci in regr.coef_:
new_ci = round(ci/abs_max,round_default)
stand_coef.append(new_ci)
# coeff_zip = zip(regr.coef_, numeric_atts)
coeff_zip = zip(stand_coef, numeric_atts)
coeff_sorted = sorted(coeff_zip, key=lambda tup: abs(tup[0]), reverse=True)
if len(coeff_sorted) > top_threshold:
coeff_return = coeff_sorted[0:top_threshold]
else:
coeff_return = coeff_sorted
# only return top_threshold most correlated attributes
return coeff_return
def compute_statistic_topN(chosed_atts,current_file,top_N,round_default=1):
"""
Compute the statistics of input attributes.
Attributes:
chosed_atts: list of attributes to be computed
current_file: file name that stored the data (with out ".csv" suffix)
top_N: size of data to be used in current_file
round_default: threshold of round function for the returned statistics
Return: json data of computed statistics
"""
# data is sorted by ranking scores from higher to lower
data = pd.read_csv(current_file+"_weightsum.csv").head(top_N)
statistic_data = {}
# get the median data
for atti in chosed_atts:
cur_att_max = max(data[atti])
cur_att_median = np.median(data[atti])
cur_att_min = min(data[atti])
statistic_data[atti] = {"max": round(cur_att_max, round_default),
"median": round(cur_att_median, round_default),
"min": round(cur_att_min, round_default)}
return statistic_data
def mergeUnfairRanking(_px, _sensitive_idx, _fprob): # input is the ranking
"""
Generate a fair ranking.
Attributes:
_px: input ranking (sorted), list of ids
_sensitive_idx: the index of protected group in the input ranking
_fprob: probability to choose the protected group
Return: generated fair ranking, list of ids
"""
# _px=sorted(range(len(_inputrankingscore)), key=lambda k: _inputrankingscore[k],reverse=True)
rx = [x for x in _px if x not in _sensitive_idx]
qx = [x for x in _px if x in _sensitive_idx]
rx.reverse() # prepare for pop function to get the first element
qx.reverse()
res_list = []
while (len(qx) > 0 and len(rx) > 0):
r_cur = random.random()
# r_cur=random.uniform(0,1.1)
if r_cur < _fprob:
res_list.append(qx.pop()) # insert protected group first
else:
res_list.append(rx.pop())
if len(qx) > 0:
qx.reverse()
res_list = res_list + qx
if len(rx) > 0:
rx.reverse()
res_list = res_list + rx
if len(res_list) < len(_px):
print("Error!")
return res_list
def runFairOracles(chosed_atts,current_file,alpha_default=0.05,k_threshold=200,k_percentage=0.5):
"""
Run all fairness oracles: FA*IR, Pairwise and Proportion
Attributes:
chosed_atts: list of sensitive attributes
current_file: file name that stored the data (with out ".csv" suffix)
alpha_default: default value of significance level in each oracle
k_threshold: threshold of size of upload data to decide the top-K in FA*IR and Proportion
k_percentage: threshold to help to decide the top-K in FA*IR and Proportion when upload dataset's size less than k_threshold
Return: json data of fairness results of all oracles
"""
# data is sorted by ranking scores from higher to lower
data = pd.read_csv(current_file+"_weightsum.csv")
total_n = len(data)
# set top K based on the size of input data
# if N > 200, then set top K = 100, else set top K = 0.5*N
if total_n > k_threshold:
top_K = 100
else:
top_K = int(np.ceil(k_percentage* total_n))
fair_res_data = {} # include all details of fairness validation
fair_statement_data = {} # only include the fairness result, i.e. fair or unfair, True represents fair, False represents unfair.
for si in chosed_atts:
# get the unique value of this sensitive attribute
values_si_att = list(data[si].unique())
# for each value, compute the current pairs and estimated fair pairs
si_value_json = {}
si_fair_json = {}
for vi in values_si_att:
# run FAIR oracle to compute its p-value and alpha_c
p_value_fair,alphac_fair = computePvalueFAIR(si,vi,current_file,top_K)
res_fair= p_value_fair > alphac_fair
# run Pairwise orace to compute its p-value, alpha use the default value
p_value_pairwise = computePvaluePairwise(si,vi,current_file)
res_pairwise = p_value_pairwise > alpha_default
# run Proportion oracle to compute its p-value, alpha use the default value
p_value_proportion = computePvalueProportion(si,vi,current_file,top_K)
res_proportion = p_value_proportion > alpha_default
if not isinstance(vi, str):
filled_vi = vi
else:
filled_vi = vi.replace(" ", "")
si_value_json[filled_vi] = [p_value_fair,alphac_fair,p_value_pairwise,alpha_default,p_value_proportion,alpha_default]
si_fair_json[filled_vi] = [res_fair,res_pairwise,res_proportion]
if not isinstance(si, str):
filled_si = si
else:
filled_si = si.replace(" ", "")
fair_res_data[filled_si] = si_value_json
fair_statement_data[filled_si] = si_fair_json
return fair_res_data, fair_statement_data, alpha_default, top_K
def computePvalueFAIR(att_name,att_value,current_file,top_K,round_default=2):
"""
Compute p-value using FA*IR oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
top_K: top_K value in FA*IR
round_default: threshold of round function for the returned p-value
Return: rounded p-value and adjusted significance level in FA*IR
"""
# input checked_atts includes names of checked sensitive attributes
data = | pd.read_csv(current_file + "_weightsum.csv") | pandas.read_csv |
import flask
#import sklearn
from flask import Flask,redirect, url_for, request,render_template,send_file,jsonify
#from bottle import static_file
import werkzeug
import pandas as pd
from werkzeug.utils import secure_filename
from flask import get_flashed_messages
from flask import flash
#from werkzeug import FileWrapper
#from io import BytesIO
from flask import Flask, Response
from flask import send_from_directory
import sys
global d
global d1
global labs,lablist
import warnings
warnings.filterwarnings("ignore")
app=Flask(__name__)
@app.route("/viewresult/<string:name>")
def viewresult(name):
global d
global d1
global nl
global labs ,lablist
import pandas as pd
global th,tl,nl
"""
tl=int(input("enter no of types of labs in the curriculum: "))
for typ in range(tl):
nty=int(input("no of labs of type {}: ".format(typ)))
nl.append(nty)"""
s=pd.read_csv(name)
print(s.head())
#s.columns=["Unnamed: 0","Year","Course Teachers","Section","Course Name","theorey","lab","labavil","xcom","labtype","CBCS"]
#for i in range(len(s)):
co=s["Course Teachers"]
for i in range(len(co)):
co[i]=str(co[i])
if(co[i]=="nan"):
co[i]=co[i-1]
co[i]=str(co[i])
co[i]=co[i].replace(".","")
co[i]=co[i].lower()
co[i]=co[i].replace(" ","")
co[i]=co[i].replace("\n","")
s["Course Teachers"]=co
grouped=[]
data=[]
s["x com"]=s["x com"].astype(float)
s["theorey"]=s["theorey"].astype(float)
s["labtype"]=s["labtype"].astype(float)
s["lab"]=s["lab"]+(s["labavil"]*0.1)
s["lab"]=s["lab"].astype(float)
s.drop(columns=["labavil"],inplace=True)
for i in range(len(s)):
if(s["CBCS"][i]!=0):
g=list(s.iloc[i,:])
g.insert(0,s["CBCS"][i])
grouped.append(g)
else:
data.append(list(s.iloc[i,:]))
print(grouped,"---------------------------------------------------------")
s.drop(s.columns[0],axis=1,inplace=True)
colsn=s.columns
grouped.sort()
classes=[]
cl=[]
fl=[];col=[]
t1=[]
ind=0
import warnings
warnings.filterwarnings("ignore")
for i in range(len(grouped)-1):
if(grouped[i][0]==grouped[i+1][0]):
classes=grouped[i][4].split("/")
for j in range(len(classes)):
cl.append(grouped[i][2]+" "+classes[j])
col.append(grouped[i][5])
fl.append(grouped[i][3])
else:
if("/" in grouped[i][4]):
classes=grouped[i][4].split("/")
for j in range(len(classes)):
cl.append(grouped[i][2]+" "+classes[j])
col.append(grouped[i][5])
fl.append(grouped[i][3])
else:
cl.append(grouped[i][2]+" "+grouped[i][4])
col.append(grouped[i][5])
fl.append(grouped[i][3])
k="/".join(sorted(list(set(cl))));grouped[i][2]=k;grouped[i][4]="CBCS"
print(grouped[i],"***********************************************")
print(fl)
print(col)
k1="/".join(sorted(list(set(fl))));grouped[i][3]=k1
k2="/".join(sorted(list(set(col))));grouped[i][5]=k2
t1.extend(list(set(fl)))
t1.append(k1)
cl=[]
fl=[]
col=[]
ind=i
cl=[]
fl=[]
col=[]
for i in range(ind+1,len(grouped)):
classes=grouped[i][4].split("/")
for j in range(len(classes)):
cl.append(grouped[i][2]+" "+classes[j])
col.append(grouped[i][5])
fl.append(grouped[i][3])
if(len(grouped)!=0):
k1="/".join(sorted(list(set(fl))));grouped[i][3]=k1
k="/".join(sorted(list(set(cl))));grouped[i][2]=k;grouped[i][4]="CBCS"
k2="/".join(sorted(list(set(col))));grouped[i][5]=k2
t1.extend(list(set(fl)))
t1.append(k1)
grp=[]
for i in grouped:
if(i[4]=="CBCS"):
i.pop(0)
grp.append(i)
data.insert(0,i)
dd= | pd.DataFrame(data) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import statistics
from datetime import datetime, timedelta
def _list_build(done_list, total_list, deliveries):
_case = done_list[-1]
_case_list = []
while _case < total_list[-1]:
_case += deliveries
if _case <= total_list[-1]:
_case_list.append(_case)
else:
if _case_list:
_case_list.append(
_case_list[-1]+(total_list[-1] - _case_list[-1]))
else:
_case_list.append(_case+(total_list[-1] - _case))
return _case_list
def forecasting(df):
done_list = df['done'].tolist()
date_list = df['date'].tolist()
total_list = df['total'].tolist()
deliveries = []
if set(done_list) == {0}:
return
deliveries = [done_list[i] - done_list[i-1]
for i in range(1, len(done_list))]
deliveries.append(done_list[0])
if len(deliveries) == 0:
return
_pencentil50 = int(np.percentile(deliveries, 50))
_pencentil75 = int(np.percentile(deliveries, 75))
deliveries = list(set(filter(lambda a: a != 0, deliveries)))
deliveries.sort()
if len(deliveries) == 0:
return
df_best = pd.DataFrame({'date': [date_list[-1]], 'best': [done_list[-1]]})
df_worst = pd.DataFrame(
{'date': [date_list[-1]], 'worst': [done_list[-1]]})
df_percentile_seventy_five = pd.DataFrame(
{'date': [date_list[-1]], 'seventy_five': [done_list[-1]]})
df_percentile_fifty = pd.DataFrame(
{'date': [date_list[-1]], 'fifty': [done_list[-1]]})
_best_list = _list_build(done_list, total_list, deliveries[-1])
_worst_list = _list_build(done_list, total_list, deliveries[0])
_percentile_seventy_five_list = _list_build(done_list, total_list, _pencentil75)
_percentile_fifty_list = _list_build(done_list, total_list, _pencentil50)
if len(date_list) > 1:
_cycle_delta = datetime.strptime(date_list[1], '%d/%m/%Y') - datetime.strptime(date_list[0], '%d/%m/%Y')
else:
_cycle_delta = datetime.strptime(date_list[0], '%d/%m/%Y')
datetime_object = datetime.strptime(date_list[-1], '%d/%m/%Y')
_datetime_object = datetime_object
for b in _best_list:
if len(date_list) > 1:
_datetime_object = _datetime_object + timedelta(days=_cycle_delta.days)
else:
_datetime_object = _datetime_object + timedelta(days=7)
df_best = df_best.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "best": [b], }), ignore_index=True)
_datetime_object = datetime_object
for w in _worst_list:
_datetime_object = _datetime_object + timedelta(days=7)
df_worst = df_worst.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "worst": [w], }), ignore_index=True)
_datetime_object = datetime_object
for p in _percentile_seventy_five_list:
if len(date_list) > 1:
_datetime_object = _datetime_object + timedelta(days=_cycle_delta.days)
else:
_datetime_object = _datetime_object + timedelta(days=7)
df_percentile_seventy_five = df_percentile_seventy_five.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "seventy_five": [p], }), ignore_index=True)
_datetime_object = datetime_object
for pf in _percentile_fifty_list:
if len(date_list) > 1:
_datetime_object = _datetime_object + timedelta(days=_cycle_delta.days)
else:
_datetime_object = _datetime_object + timedelta(days=7)
df_percentile_fifty = df_percentile_fifty.append(pd.DataFrame(
{"date": [_datetime_object.strftime("%d/%m/%Y")], "fifty": [pf], }), ignore_index=True)
df_bw = pd.merge(df_worst, df_best, how='outer', on='date')
df_p = pd.merge(df_bw, df_percentile_seventy_five, how='outer', on='date')
df_pf = pd.merge(df_p, df_percentile_fifty, how='outer', on='date')
df_final = | pd.merge(df, df_pf, how='outer', on='date') | pandas.merge |
import pandas as pd
import numpy as np
from numpy import float32
import matplotlib as mpl
import matplotlib.pyplot as plt
BUCKET_VOLUME_SIZE = 844 # to be tweated
WINDOWS_LENGTH = 50 # to be tweated
sum_v_tau_b_minus_s = 0
v_tau_b_mius_s = [
0
] * WINDOWS_LENGTH # a list to save values of |v_tau_s - v_tau_b|
vpin_num = 0
vpin_df = pd.DataFrame(columns=['bucket_time', 'vpin'])
def new_bucket(buy_volume, sell_volume, bucket_time):
global sum_v_tau_b_minus_s, v_tau_b_mius_s, vpin_num, vpin_df
sum_v_tau_b_minus_s = sum_v_tau_b_minus_s - v_tau_b_mius_s[
vpin_num % WINDOWS_LENGTH] + abs(buy_volume - sell_volume)
v_tau_b_mius_s[vpin_num % WINDOWS_LENGTH] = abs(buy_volume - sell_volume)
vpin_num += 1
if vpin_num >= WINDOWS_LENGTH:
vpin = sum_v_tau_b_minus_s / (WINDOWS_LENGTH * BUCKET_VOLUME_SIZE)
# cumulative_count_vpin = len(vpin_df[vpin >= vpin_df['vpin']])
# len_vpin = len(vpin_df)
vpin_df = vpin_df.append( # add a new timeseries of VPIN (bucket_time, vpin)
{
'bucket_time': bucket_time,
'vpin': vpin
# 'cdf_vpin': cumulative_count_vpin / len_vpin
},
ignore_index=True)
# all_trades = pd.read_csv('./BTCUSDT/BINANCE_BTCUSDT_1808.csv')
all_trades = | pd.read_csv('./BTCUSDT/binance_20180801.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:41:08 2020
@author: jireh.park
"""
# path 설정
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
# library
import pandas as pd
from google.cloud import storage
from appointment.appointment import *
from datetime import datetime
def main():
global sub_index, hot4, route, route_uni, place_list, bucket_name, save_path, file_name
# gcs 인증
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.dirname(
os.path.abspath(os.path.dirname(__file__))) + '/key/level-district.json'
# 이름 설정
bucket_name = 'j-first-bucket'
save_path = 'place/'
# data setting
setting_data()
#print(route_uni)
# calculate all place and save to gcs
calculate_place()
#print(data)
def list_blob(bucket_name):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blobs = list(bucket.list_blobs())
return blobs
def blobs_to_dataframe(blobs):
df = pd.DataFrame()
for bl in blobs:
if 'route_all' in bl.name:
with open("tmp.txt", "wb") as file_obj:
bl.download_to_file(file_obj)
df = df.append(pd.read_csv("tmp.txt"))
print(bl.name)
else:
pass
return df
def upload_blob(bucket_name, destination_blob_name, df):
global credentials
"""Uploads a file to the bucket.
bucket_name = "your-bucket-name"
destination_blob_name = "storage-object-name"
df = dataframe to save
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
df.to_csv("tmp.txt", encoding='utf-8', index=False)
blob.upload_from_filename("tmp.txt", content_type='text/csv')
print(
"File uploaded to {}.".format(
destination_blob_name
)
)
def setting_data():
global sub_index, hot4, route, route_uni, place_list
# 역번호 리스트 호출
path = "../data/"
sub_index = pd.read_csv(path + "sub_index.txt",
encoding = 'cp949', sep = '|', engine = 'python', dtype = str)
# 핫플레이스 리스트 호출
hot4 = pd.read_csv(path + "sub_hot4.txt",
sep = '|', encoding = 'cp949', dtype = str)
# route 데이터 호출
blobs = list_blob(bucket_name)
route = blobs_to_dataframe(blobs)
# 중복제거
route_uni = route.loc[pd.DataFrame(np.sort(route[['start','destination']],1),index=route.index).drop_duplicates(keep='first').index]
route_uni = route_uni.reset_index(drop = True)
# 핫플레이스 역명 리스트
base = basic(df = sub_index)
place_list = base.code_to_name(hot4['역번호'].tolist())
def calculate_place():
global sub_index, hot4, route, route_uni, place_list
# 약속장소 산출
st = datetime.now()
s1_ = 0
place_df = pd.DataFrame()
for ii in range(len(route_uni)):
s1 = route_uni.loc[ii, 'start']
s2 = route_uni.loc[ii, 'destination']
print("%s\t%s\t\t%3.2f\t%s" %(s1, s2, round(ii / len(route_uni) * 100, 2), str(datetime.now() - st)))
if s1 != s1_:
if len(place_df) > 0:
file_name = "place_%s_%d" %(s1_, ii)
upload_blob(bucket_name, save_path + file_name, place_df)
place_df = | pd.DataFrame() | pandas.DataFrame |
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
class aerodynamicalBalance:
def __init__(self):
self.F = None # 力与力矩
self.array = None # 系数矩阵
self.angle = None
def getDelta(self):
pass
def setLinearArray(self, array):
"""
读入系数矩阵并求逆.
[input]
array: 测力天平系数矩阵
"""
try:
self.array = np.linalg.inv(array)
except(np.linalg.LinAlgError):
print("Error: Singular matrix.\n")
print("press any key to exit...")
input()
exit()
def reverse(self):
"""
用于倒置转动的方向.
"""
self.F = self.F[::-1]
def left_offset(self, offset):
"""
用于攻角的偏移.
"""
self.F = np.r_[self.F[offset:], self.F[:offset]]
def solve(self):
self.F = np.dot(self.array, self.F.T)
self.F = self.F.T
def scale(self, coeff):
self.F *= coeff
def readTXTFromDiv5(self, filename):
"""
[input]
filename: 测力天平输出的数据文件.txt, 要求第一行为初始标定行,其余为各风攻角数据行
例:
2021-1-28 14:13:54 -155.9 94.2 235.1 124.6 -187.1 #无风时的测量值
2021-1-28 14:22:53 -67.9 159.9 245.8 147.3 -190.5 #第1个攻角的测量值
...
2021-1-28 14:56:16 -48.6 152.0 279.1 155.3 -192.3 #第N个攻角的测量值
"""
df = []
with open(filename, 'r') as f:
for line in f:
if len(line.split()) < 5:
continue
Fx = float(line.split()[3]) # 阻力
Fy = float(line.split()[3]) # 侧力
Mx = float(line.split()[3])
My = float(line.split()[3])
Mz = float(line.split()[3]) # 扭矩
df.append([Fx, Fy, Mx, My, Mz])
df = np.array(df)
print(df)
self.F = (df[1:] - df[0])*9.8
print(self.F)
def writeCSV(self, filename):
df = | pd.DataFrame(self.F, columns=['Fx', 'Fy', 'Mx', 'My', 'Mz']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
def scal():
dataset = pd.read_csv('../train_cuting/train_cutting2_lstm_mean.csv')
dataset['Timestamp'] = pd.to_datetime(dataset['Timestamp'])
dataset = dataset.set_index('Timestamp')
dataset.index.name = 'date'
scaler = MinMaxScaler(feature_range=(0, 1))
values = dataset['Value']
values = values.values
values = values.reshape(-1, 1)
scaler.fit_transform(values)
return scaler
def data_process_lstm(path1=None, name1=None, path2=None, name2=None, scaler=None):
if path1 == None:
dataset = | pd.read_csv(name1) | pandas.read_csv |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# File called _pytest for PyCharm compatability
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_index_equal, assert_series_equal
from tests.common import TestData
class TestGroupbyDataFrame(TestData):
funcs = ["max", "min", "mean", "sum"]
filter_data = [
"AvgTicketPrice",
"Cancelled",
"dayOfWeek",
]
ecommerce_filter_data = [
"total_quantity",
"geoip.region_name",
"day_of_week",
"total_unique_products",
"taxful_total_price",
]
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("numeric_only", [True])
def test_groupby_aggregate(self, numeric_only, dropna):
# TODO Add tests for numeric_only=False for aggs
# when we support aggregations on text fields
pd_flights = self.pd_flights().filter(self.filter_data)
ed_flights = self.ed_flights().filter(self.filter_data)
pd_groupby = pd_flights.groupby("Cancelled", dropna=dropna).agg(
self.funcs, numeric_only=numeric_only
)
ed_groupby = ed_flights.groupby("Cancelled", dropna=dropna).agg(
self.funcs, numeric_only=numeric_only
)
# checking only values because dtypes are checked in aggs tests
assert_frame_equal(pd_groupby, ed_groupby, check_exact=False, check_dtype=False)
@pytest.mark.parametrize("pd_agg", funcs)
def test_groupby_aggregate_single_aggs(self, pd_agg):
pd_flights = self.pd_flights().filter(self.filter_data)
ed_flights = self.ed_flights().filter(self.filter_data)
pd_groupby = pd_flights.groupby("Cancelled").agg([pd_agg], numeric_only=True)
ed_groupby = ed_flights.groupby("Cancelled").agg([pd_agg], numeric_only=True)
# checking only values because dtypes are checked in aggs tests
assert_frame_equal(pd_groupby, ed_groupby, check_exact=False, check_dtype=False)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("pd_agg", ["max", "min", "mean", "sum", "median"])
def test_groupby_aggs_numeric_only_true(self, pd_agg, dropna):
# Pandas has numeric_only applicable for the above aggs with groupby only.
pd_flights = self.pd_flights().filter(self.filter_data)
ed_flights = self.ed_flights().filter(self.filter_data)
pd_groupby = getattr(pd_flights.groupby("Cancelled", dropna=dropna), pd_agg)(
numeric_only=True
)
ed_groupby = getattr(ed_flights.groupby("Cancelled", dropna=dropna), pd_agg)(
numeric_only=True
)
# checking only values because dtypes are checked in aggs tests
assert_frame_equal(
pd_groupby, ed_groupby, check_exact=False, check_dtype=False, rtol=2
)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("pd_agg", ["mad", "var", "std"])
def test_groupby_aggs_mad_var_std(self, pd_agg, dropna):
# For these aggs pandas doesn't support numeric_only
pd_flights = self.pd_flights().filter(self.filter_data)
ed_flights = self.ed_flights().filter(self.filter_data)
pd_groupby = getattr(pd_flights.groupby("Cancelled", dropna=dropna), pd_agg)()
ed_groupby = getattr(ed_flights.groupby("Cancelled", dropna=dropna), pd_agg)(
numeric_only=True
)
# checking only values because dtypes are checked in aggs tests
assert_frame_equal(
pd_groupby, ed_groupby, check_exact=False, check_dtype=False, rtol=4
)
@pytest.mark.parametrize("pd_agg", ["nunique"])
def test_groupby_aggs_nunique(self, pd_agg):
pd_flights = self.pd_flights().filter(self.filter_data)
ed_flights = self.ed_flights().filter(self.filter_data)
pd_groupby = getattr(pd_flights.groupby("Cancelled"), pd_agg)()
ed_groupby = getattr(ed_flights.groupby("Cancelled"), pd_agg)()
# checking only values because dtypes are checked in aggs tests
assert_frame_equal(
pd_groupby, ed_groupby, check_exact=False, check_dtype=False, rtol=4
)
@pytest.mark.parametrize("pd_agg", ["max", "min", "mean", "median"])
def test_groupby_aggs_numeric_only_false(self, pd_agg):
pd_flights = self.pd_flights().filter(self.filter_data + ["timestamp"])
ed_flights = self.ed_flights().filter(self.filter_data + ["timestamp"])
# pandas numeric_only=False, matches with Eland numeric_only=None
pd_groupby = getattr(pd_flights.groupby("Cancelled"), pd_agg)(
numeric_only=False
)
ed_groupby = getattr(ed_flights.groupby("Cancelled"), pd_agg)(numeric_only=None)
# sum usually returns NaT for Eland, Nothing is returned from pandas
# we only check timestamp field here, because remaining cols are similar to numeric_only=True tests
# assert_frame_equal doesn't work well for timestamp fields (It converts into int)
# so we convert it into float
pd_timestamp = pd.to_numeric(pd_groupby["timestamp"], downcast="float")
ed_timestamp = | pd.to_numeric(ed_groupby["timestamp"], downcast="float") | pandas.to_numeric |
"""Classes for report generation and add-ons."""
import os
from copy import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from jinja2 import FileSystemLoader, Environment
from json2html import json2html
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support, roc_curve, precision_recall_curve, \
average_precision_score, explained_variance_score, mean_absolute_error, \
mean_squared_error, median_absolute_error, r2_score, f1_score, precision_score, recall_score, confusion_matrix
from ..utils.logging import get_logger
logger = get_logger(__name__)
base_dir = os.path.dirname(__file__)
def extract_params(input_struct):
params = dict()
iterator = input_struct if isinstance(input_struct, dict) else input_struct.__dict__
for key in iterator:
if key.startswith(('_', 'autonlp_params')):
continue
value = iterator[key]
if type(value) in [bool, int, float, str]:
params[key] = value
elif value is None:
params[key] = None
elif hasattr(value, '__dict__') or isinstance(value, dict):
params[key] = extract_params(value)
else:
params[key] = str(type(value))
return params
def plot_roc_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
fpr, tpr, _ = roc_curve(data['y_true'], data['y_pred'])
auc_score = roc_auc_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(fpr, tpr, color='blue', lw=lw, label='Trained model');
plt.plot([0, 1], [0, 1], color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([-0.05, 1.05]);
plt.xlabel('False Positive Rate');
plt.ylabel('True Positive Rate');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('ROC curve (GINI = {:.3f})'.format(2 * auc_score - 1));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
return auc_score
def plot_pr_curve_image(data, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10));
precision, recall, _ = precision_recall_curve(data['y_true'], data['y_pred'])
ap_score = average_precision_score(data['y_true'], data['y_pred'])
lw = 2
plt.plot(recall, precision, color='blue', lw=lw, label='Trained model');
positive_rate = np.sum(data['y_true'] == 1) / data.shape[0]
plt.plot([0, 1], [positive_rate, positive_rate], \
color='red', lw=lw, linestyle='--', label='Random model');
plt.xlim([-0.05, 1.05]);
plt.ylim([0.45, 1.05]);
plt.xlabel('Recall');
plt.ylabel('Precision');
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2);
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45);
plt.yticks(np.arange(0, 1.01, 0.05));
plt.grid(color='gray', linestyle='-', linewidth=1);
plt.title('PR curve (AP = {:.3f})'.format(ap_score));
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight');
plt.close()
def plot_preds_distribution_by_bins(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
box_plot_data = []
labels = []
for name, group in data.groupby('bin'):
labels.append(name)
box_plot_data.append(group['y_pred'].values)
box = axs.boxplot(box_plot_data, patch_artist=True, labels=labels)
for patch in box['boxes']:
patch.set_facecolor('green')
axs.set_yscale('log')
axs.set_xlabel('Bin number')
axs.set_ylabel('Prediction')
axs.set_title('Distribution of object predictions by bin')
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_distribution_of_logits(data, path):
sns.set(style="whitegrid", font_scale=1.5)
fig, axs = plt.subplots(figsize=(16, 10))
data['proba_logit'] = np.log(data['y_pred'].values / (1 - data['y_pred'].values))
sns.kdeplot(data[data['y_true'] == 0]['proba_logit'], shade=True, color="r", label='Class 0 logits', ax=axs)
sns.kdeplot(data[data['y_true'] == 1]['proba_logit'], shade=True, color="g", label='Class 1 logits', ax=axs)
axs.set_xlabel('Logits')
axs.set_ylabel('Density')
axs.set_title('Logits distribution of object predictions (by classes)');
fig.savefig(path, bbox_inches='tight');
plt.close()
def plot_pie_f1_metric(data, F1_thresh, path):
tn, fp, fn, tp = confusion_matrix(data['y_true'], (data['y_pred'] > F1_thresh).astype(int)).ravel()
(_, prec), (_, rec), (_, F1), (_, _) = precision_recall_fscore_support(data['y_true'],
(data['y_pred'] > F1_thresh).astype(int))
sns.set(style="whitegrid", font_scale=1.5)
fig, ax = plt.subplots(figsize=(20, 10), subplot_kw=dict(aspect="equal"))
recipe = ["{} True Positives".format(tp),
"{} False Positives".format(fp),
"{} False Negatives".format(fn),
"{} True Negatives".format(tn)]
wedges, texts = ax.pie([tp, fp, fn, tn], wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-", color='k'),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1) / 2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(recipe[i], xy=(x, y), xytext=(1.35 * np.sign(x), 1.4 * y),
horizontalalignment=horizontalalignment, **kw)
ax.set_title(
"Trained model: Precision = {:.2f}%, Recall = {:.2f}%, F1-Score = {:.2f}%".format(prec * 100, rec * 100, F1 * 100))
plt.savefig(path, bbox_inches='tight');
plt.close()
return prec, rec, F1
def f1_score_w_co(data, min_co=.01, max_co=.99, step=0.01):
data['y_pred'] = np.clip(np.ceil(data['y_pred'].values / step) * step, min_co, max_co)
pos = data['y_true'].sum()
neg = data['y_true'].shape[0] - pos
grp = | pd.DataFrame(data) | pandas.DataFrame |
import sys
from itertools import chain
import pandas as pd
import numpy as np
from sklearn.metrics import log_loss
from sklearn.preprocessing import StandardScaler
from models import EnsembleXGBoostClassifier
from models import EnsembleLightGBMClassifier
from models import EnsembleNNClassifier
np.random.seed = 13
### Parameters of ensembles
num_ensembles = 25
submits_in_ensemble_min = 0.2
submits_in_ensemble_max = 0.8
features_in_ensemble_min = 0.3
features_in_ensemble_max = 0.8
##! Parameters of ensembles
def get_random_model(input_dim):
models = [
EnsembleXGBoostClassifier(),
EnsembleLightGBMClassifier(),
EnsembleNNClassifier(input_dim)
]
probas = [0.5, 0.5, 0.0]
return np.random.choice(models, p=probas)
submits = ['0' + str(i) for i in chain(
range(43, 54),
range(55, 61),
range(62, 66),
range(70, 81),
range(82, 85)
)
]
submits_cv = [pd.read_csv('../submits_cv/submission_cv_' + submit + '.csv') for submit in submits]
submits_lb = [pd.read_csv('../submits/submission_' + submit + '.csv') for submit in submits]
meta_cv = | pd.read_csv('./train_metadata.csv') | pandas.read_csv |
from io import StringIO
import os
import numpy as np
import pandas as pd
from .. import fem_attribute
class ListStringSeries():
def __init__(self, list_string_series):
self._list_string_series = list_string_series
return
def __len__(self):
return len(self._list_string_series)
def __getitem__(self, key):
if isinstance(key, int):
return self._list_string_series[key]
elif isinstance(key, list):
return [self[i] for i in key]
else:
raise ValueError(f"Unexpected key: {key}")
def strip(self):
return [s.strip() for s in self]
def expand_include(self, pattern, base_name):
return [s.expand_include(pattern, base_name) for s in self]
class StringSeries(pd.Series):
def __init__(self, *args, **kw):
if len(args) == 0 or len(args[0]) == 0:
kw['dtype'] = object
super().__init__(*args, **kw)
@property
def _constructor(self):
return StringSeries
@classmethod
def read_file(cls, file_name, *, pattern_ignore=None):
"""Read file and convert to numpy string array.
Args:
file_name: String of file name.
pattern_ignore: String to be used for ignore unecessary line
e.g. comment.
Returns:
StringDataFrame object. Each component corresponds to each line of
the input file.
"""
print(f"Reading file: {file_name}")
s = pd.read_csv(
file_name, header=None, index_col=None, sep='@', dtype=str)[0]
# sep='@' because don't want to separate
if pattern_ignore is None:
return cls(s)
else:
return cls(s).find_match(
pattern_ignore, negative_match=True)
@classmethod
def read_files(cls, file_names, *, pattern_ignore=None, separate=False):
"""Read files.
Args:
file_names: Array of strings indicating file names.
pattern_ignore: String to be used for ignore unecessary line
e.g. comment.
separate: bool
If True, return separated contents, namely, ListStringSeries
object.
Returns:
StringDataFrame object. Each component corresponds to each line of
input files (contents are concatenated).
"""
if separate:
list_string_series = ListStringSeries([
cls.read_file(file_name, pattern_ignore=pattern_ignore)
for file_name in file_names])
if len(list_string_series) == 1:
return list_string_series[0]
else:
return list_string_series
else:
return cls(pd.concat([
cls.read_file(file_name, pattern_ignore=pattern_ignore)
for file_name in file_names]))
@classmethod
def read_array(cls, _array, *, delimiter=',', str_format=None):
"""Read array to make StringSeries object.
Args:
array: Ndarray or list of NDarray to make StringSeries object.
delimiter: String indicating delimiter to connect components in
a raw (default: ',').
str_format: Format string to be passed to numpy.savetxt.
Returns: StringSeries object after reading arrays.
"""
array = np.asarray(_array)
if str_format is None and 'float' in str(array.dtype):
str_format = '%.8E'
if len(array.shape) == 1:
if str_format is None:
try:
str_array = array.astype(str)
return cls(str_array)
except ValueError:
return cls.read_array(
array[:, None], delimiter=delimiter,
str_format=str_format)
else:
sio = StringIO()
np.savetxt(sio, array, fmt=str_format)
return cls(sio.getvalue().split('\n')[:-1])
elif len(array.shape) == 2 and array.shape[1] == 1:
if str_format is None:
try:
converted_array = array.astype(str)
# Array can be converted to other types
return cls(converted_array[:, 0])
except ValueError:
# Array is realy object
return cls(np.array([
'\n'.join(delimiter.join(a) for a in arr.astype(str))
for arr in array[:, 0]
]))
else:
sio = StringIO()
np.savetxt(sio, array[:, 0], fmt=str_format)
return cls(sio.getvalue().split('\n')[:-1])
elif len(array.shape) > 2:
raise ValueError(f"Too high dimensions: {array.shape}")
else:
pass
a0 = array[:, 0]
if str_format is None:
s = cls(a0.astype(str))
for a in array[:, 1:].T:
s = s.connect(a.astype(str))
else:
sio = StringIO()
np.savetxt(sio, a0, fmt=str_format)
s = cls(sio.getvalue().split('\n')[:-1])
for a in array[:, 1:].T:
sio = StringIO()
np.savetxt(sio, a, fmt=str_format)
s = s.connect(sio.getvalue().split('\n')[:-1])
return s
@classmethod
def connect_all(cls, list_data, delimiter=',', str_format=None):
if len(list_data) == 0:
return cls()
if str_format is None:
str_format = [None] * len(list_data)
elif isinstance(str_format, str):
str_format = [str_format] * len(list_data)
if len(list_data) != len(str_format):
raise ValueError(
'When str_format is list, the length should be'
'the same as that of list_data'
f"({len(str_format)} vs {len(list_data)})")
s = cls.read_array(list_data[0], str_format=str_format[0])
for d, f in zip(list_data[1:], str_format[1:]):
s = s.connect(
cls.read_array(d, str_format=f), delimiter=delimiter)
return s
@classmethod
def concat(cls, list_data, axis=0):
return cls(pd.concat(list_data, axis=axis))
def to_header_data(self, pattern):
matches = self.str.match(pattern).values
headers = self[matches]
match_indices = np.concatenate([np.where(matches)[0], [len(self)]])
list_indices = [
range(i1+1, i2) for i1, i2
in zip(match_indices[:-1], match_indices[1:])]
return HeaderData(headers, list_indices, data=self)
# header_dict = {
# header: self[i1+1:i2] for header, i1, i2
# in zip(headers, match_indices[:-1], match_indices[1:])}
# return HeaderData(header_dict)
def strip(self):
return self.str.strip()
def extract_captures(self, pattern, *, convert_values=False):
captures = self.str.extract(pattern, expand=False)
captures = captures[~pd.isnull(captures)]
if convert_values:
return captures.values
else:
return captures
def find_match(self, pattern, *, allow_multiple_matches=True,
convert_values=False, negative_match=False):
"""Find match to the specified pattern.
Args:
pattern: Pattern to be used for matching.
allow_multiple_matches: True to accept several matches.
(Default = True)
convert_values: Bool, [True]
Flag to convert StringSeries to values
Returns:
StringSeries or ndarray of matches.
"""
if negative_match:
match = self[~self.str.contains(pattern)]
else:
match = self[self.str.contains(pattern)]
if not allow_multiple_matches and len(match) > 1:
raise ValueError(f"{len(match)} matches found. Expected 1.")
if convert_values:
return match.values
else:
return match
def expand_include(self, pattern, base_name):
"""Expand data like 'include' statement. Expanded data is concatenated
at the end of the non-expanded data.
Args:
pattern: Pattern showing include statement. Include file should be
captured with the first expression.
base_name: Directory name of the include file location.
Returns:
StringSeries object after expansion.
"""
captures = self.extract_captures(pattern)
include_files = [os.path.join(base_name, c) for c in captures]
if len(include_files) == 0:
return self
include_ss = StringSeries.read_files(include_files)
return | pd.concat([self, include_ss], ignore_index=True) | pandas.concat |
from __future__ import division, print_function
import numpy as np
import pandas as pd
from scipy.stats import skew
from sklearn.preprocessing import RobustScaler
from sklearn.linear_model import LassoCV,RidgeCV
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score, train_test_split
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from keras.layers.core import Dropout
#first attempt at a kaggle competition, experiment with different regressors and follow some of the advice in
#kernels.
def load_and_preprocess():
"""
Load the data (either train.csv or test.csv) and pre-process it with some simple
transformations. Return in the correct form for usage in scikit-learn.
Arguments
---------
filestr: string
string pointing to csv file to load into pandas
Returns
-------
X_train: numpy.array
array containing features of training set
X_test: numpy.array
array containing features of test set
y: numpy.array
array containing labels for training set
test_ID: numpy.array
IDs for test set, for submission
"""
train = | pd.read_csv("data/train.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
####################################################################################################################
# preprocess ACS data between 2012 to 2017
table_dict = {"total_population": {"table": "B01003", "zip": "GEO.id2", "variable":"HD01_VD01"},
"median_household_income": {"table": "B19013", "zip": "GEO.id2", "variable":"HD01_VD01"},
"gini_index": {"table": "B19083", "zip": "GEO.id2", "variable":"HD01_VD01"},
"health_coverage_population": {"table": "B992701", "zip": "GEO.id2", "variable":"HD01_VD02"},
"same_house": {"table": "B07012", "zip": "GEO.id2", "variable":"HD01_VD06"},
"poverty_rate": {"table": "S1701", "zip": "GEO.id2", "variable":"HC02_EST_VC01"}}
unemp_dict = {"unemployment_rate": {"table": "S2301", "zip": "GEO.id2", "variable":"HC04_EST_VC01"},
"unemployment_rate2": {"table": "DP03", "zip": "GEO.id2", "variable":"HC03_VC07"}}
def read_ACS(year_list, table_dict, unemp_dict):
'''
'''
data_dict = {}
for year in year_list:
for t_name, value in table_dict.items():
if (year == 13) and (t_name == "same_house"):
pass
else:
table_name = t_name + str(year)
df = pd.read_csv(r"..\data\ACS_final\ACS_" + str(year) + "_" + \
value["table"] + "_" + t_name + ".csv")
data_dict[table_name] = [df.iloc[1:], value]
if year <= 14:
emp_table_name = "unemployment_rate" + str(year)
df = pd.read_csv(r"..\data\ACS_final\ACS_" + str(year) + "_" + \
unemp_dict["unemployment_rate"]["table"] + "_" + "unemployment_rate" + ".csv")
data_dict[emp_table_name] = [df.iloc[1:], unemp_dict["unemployment_rate"]]
else:
emp_table_name = "unemployment_rate" + str(year)
df = pd.read_csv(r"..\data\ACS_final\ACS_" + str(year) + "_" + \
unemp_dict["unemployment_rate2"]["table"] + "_" + "unemployment_rate" + ".csv")
data_dict[emp_table_name] = [df.iloc[1:], unemp_dict["unemployment_rate2"]]
return data_dict
def ACS_select(df_dict):
'''
'''
new_df_dict = {}
yearly_data = {}
for df_name, df_value in df_dict.items():
variable, year = df_name[:-2], df_name[-2:]
df_v = df_value[1]
df = df_value[0][[df_v["zip"], df_v["variable"]]]
new_df_dict[df_name] = {df_v["zip"]:"zipcode", df_v["variable"]:variable}
df = df.rename(columns=new_df_dict[df_name])
if year not in yearly_data:
yearly_data[year] = df
else:
yearly_data[year] = pd.merge(df, yearly_data[year], left_on="zipcode", right_on="zipcode")
same_home13 = pd.DataFrame({"zipcode": yearly_data["13"]["zipcode"],"same_house":([np.nan] * yearly_data["13"].shape[0])})
yearly_data["13"] = | pd.merge(yearly_data["13"], same_home13, left_on="zipcode", right_on="zipcode") | pandas.merge |
# This file uses RNN model for making predictions about diagnostic labels.
# Note that the output of this file is generated models. File 'sim/rnn_label_pred.py'
# should be used on the output of this file to make predictions about diagnostic labels.
import sys
from multiprocessing.pool import Pool
from actionflow.data.data_process import DataProcess
from actionflow.rnn.lstm_beh import LSTMBeh
from actionflow.rnn.opt_beh import OptBEH
from actionflow.util.helper import get_total_pionts
from actionflow.util.logger import LogFile, DLogger
from BD.data.data_reader import DataReader
from BD.util.paths import Paths
import tensorflow as tf
import pandas as pd
cv_counts = 10
cv_lists_group = {}
data = DataReader.read_BD()
for group in data['diag'].unique().tolist():
gdata = data.loc[data.diag == group]
ids = gdata['id'].unique().tolist()
cv_lists = []
dftr = | pd.DataFrame({'id': ids, 'train': 'train'}) | pandas.DataFrame |
"""hgboost: Hyperoptimized Gradient Boosting library.
Contributors: https://github.com/erdogant/hgboost
"""
import warnings
warnings.filterwarnings("ignore")
import classeval as cle
from df2onehot import df2onehot
import treeplot as tree
import colourmap
import pypickle
import os
import numpy as np
import pandas as pd
import wget
from sklearn.metrics import mean_squared_error, cohen_kappa_score, mean_absolute_error, log_loss, roc_auc_score, f1_score, r2_score
from sklearn.ensemble import VotingClassifier, VotingRegressor
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
import xgboost as xgb
import catboost as ctb
try:
import lightgbm as lgb
except:
pass
from hyperopt import fmin, tpe, STATUS_OK, Trials, hp
from tqdm import tqdm
import time
import copy
# %%
class hgboost:
"""Create a class hgboost that is instantiated with the desired method."""
def __init__(self, max_eval=250, threshold=0.5, cv=5, test_size=0.2, val_size=0.2, top_cv_evals=10, is_unbalance=True, random_state=None, n_jobs=-1, verbose=3):
"""Initialize hgboost with user-defined parameters.
Parameters
----------
max_eval : int, (default : 250)
Search space is created on the number of evaluations.
threshold : float, (default : 0.5)
Classification threshold. In case of two-class model this is 0.5
cv : int, optional (default : 5)
Cross-validation. Specifying the test size by test_size.
top_cv_evals : int, (default : 10)
Number of top best performing models that is evaluated.
If set to None, each iteration (max_eval) is tested.
If set to 0, cross validation is not performed.
test_size : float, (default : 0.2)
Splitting train/test set with test_size=0.2 and train=1-test_size.
val_size : float, (default : 0.2)
Setup the validation set. This part is kept entirely separate from the test-size.
is_unbalance : Bool, (default: True)
Control the balance of positive and negative weights, useful for unbalanced classes.
xgboost clf : sum(negative instances) / sum(positive instances)
catboost clf : sum(negative instances) / sum(positive instances)
lightgbm clf : balanced
False: grid search
random_state : int, (default : None)
Fix the random state for validation set and test set. Note that is not used for the crossvalidation.
n_jobs : int, (default : -1)
The number of jobs to run in parallel for fit. None means 1 unless in a joblib.parallel_backend context.
-1 means using all processors.
verbose : int, (default : 3)
Print progress to screen.
0: None, 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
None.
References
----------
* https://github.com/hyperopt/hyperopt
* https://www.districtdatalabs.com/parameter-tuning-with-hyperopt
* https://scikit-learn.org/stable/modules/model_evaluation.html
"""
if (threshold is None) or (threshold <= 0): raise ValueError('[hgboost] >Error: [threshold] must be >0 and not [None]')
if (max_eval is None) or (max_eval <= 0): max_eval=1
if top_cv_evals is None: max_eval=0
if (test_size is None) or (test_size <= 0): raise ValueError('[hgboost] >Error: test_size must be >0 and not [None] Note: the final model is learned on the entire dataset. [test_size] may help you getting a more robust model.')
if (val_size is not None) and (val_size<=0): val_size=None
self.max_eval=max_eval
self.top_cv_evals=top_cv_evals
self.threshold=threshold
self.test_size=test_size
self.val_size=val_size
self.algo=tpe.suggest
self.cv=cv
self.random_state=random_state
self.n_jobs=n_jobs
self.verbose=verbose
self.is_unbalance = is_unbalance
def _fit(self, X, y, pos_label=None):
"""Fit the best performing model.
Description
-----------
Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
"trials".
Parameters
----------
X : pd.DataFrame
Input dataset.
y : array-like.
Response variable.
pos_label : string/int.
In case of classification (_clf), the model will be fitted on the pos_label that is in y.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
# Check input data
X, y, self.pos_label=_check_input(X, y, pos_label, self.method, verbose=self.verbose)
# Recaculate test size. This should be the percentage of the total dataset after removing the validation set.
if (self.val_size is not None) and (self.val_size > 0):
self.test_size = np.round((self.test_size * X.shape[0]) / (X.shape[0] - (self.val_size * X.shape[0])), 2)
# Print to screen
if self.verbose>=3:
print('[hgboost] >method: %s' %(self.method))
print('[hgboost] >eval_metric: %s' %(self.eval_metric))
print('[hgboost] >greater_is_better: %s' %(self.greater_is_better))
# Set validation set
self._set_validation_set(X, y)
# Find best parameters
self.model, self.results=self._HPOpt()
# Fit on all data using best parameters
if self.verbose>=3: print('[hgboost] >Retrain [%s] on the entire dataset with the optimal parameters settings.' %(self.method))
self.model.fit(X, y)
# Return
return self.results
def _classification(self, X, y, eval_metric, greater_is_better, params):
# Gather for method, the default metric and greater is better.
self.eval_metric, self.greater_is_better =_check_eval_metric(self.method, eval_metric, greater_is_better)
# Import search space for the specific function
if params == 'default': params = _get_params(self.method, eval_metric=self.eval_metric, y=y, pos_label=self.pos_label, is_unbalance=self.is_unbalance, verbose=self.verbose)
self.space = params
# Fit model
self.results = self._fit(X, y, pos_label=self.pos_label)
# Fin
if self.verbose>=3: print('[hgboost] >Fin!')
def _regression(self, X, y, eval_metric, greater_is_better, params):
# Gather for method, the default metric and greater is better.
self.eval_metric, self.greater_is_better = _check_eval_metric(self.method, eval_metric, greater_is_better)
# Import search space for the specific function
if params == 'default': params = _get_params(self.method, eval_metric=self.eval_metric, verbose=self.verbose)
self.space = params
# Fit model
self.results = self._fit(X, y)
# Fin
if self.verbose>=3: print('[hgboost] >Fin!')
def xgboost_reg(self, X, y, eval_metric='rmse', greater_is_better=False, params='default'):
"""Xgboost Regression with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like
Response variable.
eval_metric : str, (default : 'rmse').
Evaluation metric for the regressor model.
* 'rmse': root mean squared error.
* 'mse': mean squared error.
* 'mae': mean absolute error.
greater_is_better : bool (default : False).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
params : dict, (default : 'default').
Hyper parameters.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost regression..')
# Method
self.method='xgb_reg'
# Run method
self._regression(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def lightboost_reg(self, X, y, eval_metric='rmse', greater_is_better=False, params='default'):
"""Light Regression with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
eval_metric : str, (default : 'rmse').
Evaluation metric for the regressor model.
* 'rmse': root mean squared error.
* 'mse': mean squared error.
* 'mae': mean absolute error.
greater_is_better : bool (default : False).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
params : dict, (default : 'default').
Hyper parameters.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost regression..')
# Method
self.method='lgb_reg'
# Run method
self._regression(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def catboost_reg(self, X, y, eval_metric='rmse', greater_is_better=False, params='default'):
"""Catboost Regression with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
eval_metric : str, (default : 'rmse').
Evaluation metric for the regressor model.
* 'rmse': root mean squared error.
* 'mse': mean squared error.
* 'mae': mean absolute error.
greater_is_better : bool (default : False).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
params : dict, (default : 'default').
Hyper parameters.
Returns
-------
results : dict.
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost regression..')
# Method
self.method='ctb_reg'
# Run method
self._regression(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def xgboost(self, X, y, pos_label=None, method='xgb_clf', eval_metric=None, greater_is_better=None, params='default'):
"""Xgboost Classification with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
method : String, (default : 'auto').
* 'xgb_clf': XGboost two-class classifier
* 'xgb_clf_multi': XGboost multi-class classifier
eval_metric : str, (default : None).
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (default for two-class)
* 'kappa': (default for multi-class)
* 'f1': F1-score
* 'logloss'
* 'auc_cv': Compute average auc per iteration in each cross. This approach is computational expensive.
greater_is_better : bool.
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
* auc : True -> two-class
* kappa : True -> multi-class
Returns
-------
results : dict.
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost classification..')
self.method = method
self.pos_label = pos_label
# Run method
self._classification(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def catboost(self, X, y, pos_label=None, eval_metric='auc', greater_is_better=True, params='default'):
"""Catboost Classification with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
eval_metric : str, (default : 'auc').
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (default for two-class)
* 'kappa': (default for multi-class)
* 'f1': F1-score
* 'logloss'
* 'auc_cv': Compute average auc per iteration in each cross. This approach is computational expensive.
greater_is_better : bool (default : True).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
Returns
-------
results : dict.
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost classification..')
self.method = 'ctb_clf'
self.pos_label = pos_label
# Run method
self._classification(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def lightboost(self, X, y, pos_label=None, eval_metric='auc', greater_is_better=True, params='default'):
"""Lightboost Classification with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame
Input dataset.
y : array-like
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
eval_metric : str, (default : 'auc')
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (default for two-class)
* 'kappa': (default for multi-class)
* 'f1': F1-score
* 'logloss'
* 'auc_cv': Compute average auc per iteration in each cross. This approach is computational expensive.
greater_is_better : bool (default : True)
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost classification..')
self.method = 'lgb_clf'
self.pos_label = pos_label
# Run method
self._classification(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def ensemble(self, X, y, pos_label=None, methods=['xgb_clf', 'ctb_clf', 'lgb_clf'], eval_metric=None, greater_is_better=None, voting='soft'):
"""Ensemble Classification with parameter hyperoptimization.
Description
-----------
Fit best model for xgboost, catboost and lightboost, and then combine the individual models to a new one.
Parameters
----------
X : pd.DataFrame
Input dataset.
y : array-like
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
methods : list of strings, (default : ['xgb_clf','ctb_clf','lgb_clf']).
The models included for the ensemble classifier or regressor. The clf and reg models can not be combined.
* ['xgb_clf','ctb_clf','lgb_clf']
* ['xgb_reg','ctb_reg','lgb_reg']
eval_metric : str, (default : 'auc')
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (two-class classification : default)
greater_is_better : bool (default : True)
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
* auc : True -> two-class
voting : str, (default : 'soft')
Combining classifier using a voting scheme.
* 'hard': using predicted classes.
* 'soft': using the Probabilities.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* model: Ensemble of the best performing models.
* val_results: Results on independent validation dataset.
"""
# Store parameters in object
self.results = {}
self.voting = voting
self.methods = methods
if np.all(list(map(lambda x: 'clf' in x, methods))):
if self.verbose>=3: print('[hgboost] >Create ensemble classification model..')
self.method = 'ensemble_clf'
elif np.all(list(map(lambda x: 'reg' in x, methods))):
if self.verbose>=3: print('[hgboost] >Create ensemble regression model..')
self.method = 'ensemble_reg'
else:
raise ValueError('[hgboost] >Error: The input [methods] must be of type "_clf" or "_reg" but can not be combined.')
# Check input data
X, y, self.pos_label = _check_input(X, y, pos_label, self.method, verbose=self.verbose)
# Gather for method, the default metric and greater is better.
self.eval_metric, self.greater_is_better = _check_eval_metric(self.method, eval_metric, greater_is_better)
# Store the clean initialization in hgb
hgb = copy.copy(self)
# Create independent validation set.
if self.method == 'ensemble_clf':
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True, stratify=y)
else:
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True)
# Hyperparameter optimization for boosting models
models = []
for method in methods:
# Make copy of clean init
hgbM = copy.copy(hgb)
hgbM.method = method
hgbM._classification(X_train, y_train, eval_metric, greater_is_better, 'default')
# Store
models.append((method, copy.copy(hgbM.model)))
self.results[method] = {}
self.results[method]['model'] = copy.copy(hgbM)
# Create the ensemble model
if self.verbose>=3: print('[hgboost] >Fit ensemble model with [%s] voting..' %(self.voting))
if self.method == 'ensemble_clf':
model = VotingClassifier(models, voting=voting, n_jobs=self.n_jobs)
model.fit(X, y==pos_label)
else:
model = VotingRegressor(models, n_jobs=self.n_jobs)
model.fit(X, y)
# Store ensemble model
self.model = model
# Validation error for the ensemble model
if self.verbose>=3: print('[hgboost] >Evalute [ensemble] model on independent validation dataset (%.0f samples, %.2g%%)' %(len(y_val), self.val_size * 100))
# Evaluate results on the same validation set
val_score, val_results = self._eval(X_val, y_val, model, verbose=2)
if self.verbose>=3: print('[hgboost] >[Ensemble] [%s]: %.4g on independent validation dataset' %(self.eval_metric, val_score['loss']))
# Validate each of the independent methods to show differences in loss-scoring
if self.val_size is not None:
self.X_val = X_val
self.y_val = y_val
for method in methods:
# Evaluation
val_score_M, val_results_M = self._eval(X_val, y_val, self.results[method]['model'].model, verbose=2)
# Store
self.results[method]['loss'] = val_score_M['loss']
self.results[method]['val_results'] = val_results_M
if self.verbose>=3: print('[hgboost] >[%s] [%s]: %.4g on independent validation dataset' %(method, self.eval_metric, val_score_M['loss']))
# Store
self.results['val_results'] = val_results
self.results['model'] = model
# self.results['summary'] = pd.concat([hgbX.results['summary'], hgbC.results['summary'], hgbL.results['summary']])
# Return
return self.results
def _set_validation_set(self, X, y):
"""Set the validation set.
Description
-----------
Here we separate a small part of the data as the validation set.
* The new data is stored in self.X and self.y
* The validation X and y are stored in self.X_val and self.y_val
"""
if self.verbose>=3: print('[hgboost] >Total dataset: %s ' %(str(X.shape)))
if (self.val_size is not None):
if '_clf' in self.method:
self.X, self.X_val, self.y, self.y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True, stratify=y)
elif '_reg' in self.method:
self.X, self.X_val, self.y, self.y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True)
if self.verbose>=3: print('[hgboost] >Validation set: %s ' %(str(self.X_val.shape)))
else:
self.X = X
self.y = y
self.X_val = None
self.y_val = None
def _HPOpt(self):
"""Hyperoptimization of the search space.
Description
-----------
Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
"trials".
Returns
-------
model : object
Fitted model.
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
# Import the desired model-function for the classification/regression
disable = (False if (self.verbose<3) else True)
fn = getattr(self, self.method)
# Split train-test set. This set is used for parameter optimization. Note that parameters are shuffled and the train-test set is retained constant.
# This will make the comparison across parameters and not differences in train-test variances.
if '_clf' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=self.random_state, shuffle=True, stratify=self.y)
elif '_reg' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=self.random_state, shuffle=True)
if self.verbose>=3: print('[hgboost] >Train-set: %s ' %(str(self.X_train.shape)))
if self.verbose>=3: print('[hgboost] >Test-set: %s ' %(str(self.X_test.shape)))
if self.verbose>=3: print('[hgboost] >Hyperparameter optimization..')
# Hyperoptimization to find best performing model. Set the trials which is the object where all the HPopt results are stored.
trials=Trials()
best_params = fmin(fn=fn, space=self.space, algo=self.algo, max_evals=self.max_eval, trials=trials, show_progressbar=disable)
# Summary results
results_summary, model = self._to_df(trials, verbose=self.verbose)
# Cross-validation over the top n models. To speed up we can decide to further test only the best performing ones. The best performing model is returned.
if self.cv is not None:
model, results_summary, best_params = self._cv(results_summary, self.space, best_params)
# Create a basic model by using default parameters.
space_basic = {}
space_basic['fit_params'] = {'verbose': 0}
space_basic['model_params'] = {}
model_basic = getattr(self, self.method)
model_basic = fn(space_basic)['model']
comparison_results = {}
# Validation error
val_results = None
if (self.val_size is not None):
if self.verbose>=3: print('[hgboost] >Evalute best [%s] model on validation dataset (%.0f samples, %.2g%%)' %(self.method, len(self.y_val), self.val_size * 100))
# Evaluate results
val_score, val_results = self._eval(self.X_val, self.y_val, model, verbose=2)
val_score_basic, val_results_basic = self._eval(self.X_val, self.y_val, model_basic, verbose=2)
comparison_results['Model with HyperOptimized parameters (validation set)'] = val_results
comparison_results['Model with default parameters (validation set)'] = val_results_basic
if self.verbose>=3: print('[hgboost] >[%s]: %.4g using HyperOptimized parameters on validation set.' %(self.eval_metric, val_score['loss']))
if self.verbose>=3: print('[hgboost] >[%s]: %.4g using default (not optimized) parameters on validation set.' %(self.eval_metric, val_score_basic['loss']))
# Store validation results
results_summary = _store_validation_scores(results_summary, best_params, model_basic, val_score_basic, val_score, self.greater_is_better)
# Remove the model column
del results_summary['model']
# Store
results = {}
results['params'] = best_params
results['summary'] = results_summary
results['trials'] = trials
results['model'] = model
results['val_results'] = val_results
results['comparison_results'] = comparison_results
# Return
return model, results
def _cv(self, results_summary, space, best_params):
ascending = False if self.greater_is_better else True
results_summary['loss_mean'] = np.nan
results_summary['loss_std'] = np.nan
# Determine maximum folds
top_cv_evals = np.minimum(results_summary.shape[0], self.top_cv_evals)
idx = results_summary['loss'].sort_values(ascending=ascending).index[0:top_cv_evals]
if self.verbose>=3: print('[hgboost] >%.0d-fold cross validation for the top %.0d scoring models, Total nr. tests: %.0f' %(self.cv, len(idx), self.cv * len(idx)))
disable = (True if (self.verbose==0 or self.verbose>3) else False)
# Run over the top-scoring models.
for i in tqdm(idx, disable=disable):
scores = []
# Run over the cross-validations
for k in np.arange(0, self.cv):
# Split train-test set
if '_clf' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=None, shuffle=True, stratify=self.y)
elif '_reg' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=None, shuffle=True)
# Evaluate model
score, _ = self._train_model(results_summary['model'].iloc[i], space)
score.pop('model')
scores.append(score)
# Store mean and std summary
results_summary['loss_mean'].iloc[i] = pd.DataFrame(scores)['loss'].mean()
results_summary['loss_std'].iloc[i] = pd.DataFrame(scores)['loss'].std()
# Negate scoring if required. The hpopt is optimized for loss functions (lower is better). Therefore we need to set eg the auc to negative and here we need to return.
if self.greater_is_better:
results_summary['loss_mean'] = results_summary['loss_mean'] * -1
idx_best = results_summary['loss_mean'].argmax()
else:
idx_best = results_summary['loss_mean'].argmin()
# Get best performing model based on the mean scores.
model = results_summary['model'].iloc[idx_best]
results_summary['best_cv'] = False
results_summary['best_cv'].iloc[idx_best] = True
# Collect best parameters for this model
best_params = dict(results_summary.iloc[idx_best, np.isin(results_summary.columns, [*best_params.keys()])])
# Return
return model, results_summary, best_params
def _train_model(self, model, space):
verbose = 2 if self.verbose<=3 else 3
# Evaluation is determine for both training and testing set. These results can plotted after finishing.
eval_set = [(self.X_train, self.y_train), (self.X_test, self.y_test)]
# Make fit with stopping-rule to avoid overfitting. Directly perform evaluation with the eval_set.
model.fit(self.X_train, self.y_train, eval_set=eval_set, **space['fit_params'])
# Evaluate results
out, eval_results = self._eval(self.X_test, self.y_test, model, verbose=verbose)
# Return
return out, eval_results
def xgb_reg(self, space):
"""Train Xgboost regression model."""
reg = xgb.XGBRegressor(**space['model_params'], n_jobs=self.n_jobs, verbosity=0)
out, _ = self._train_model(reg, space)
return out
def lgb_reg(self, space):
"""Train lightboost regression model."""
reg = lgb.LGBMRegressor(**space['model_params'], n_jobs=self.n_jobs, verbosity=0)
out, _ = self._train_model(reg, space)
return out
def ctb_reg(self, space):
"""Train catboost regression model."""
reg = ctb.CatBoostRegressor(**space['model_params'])
out, _ = self._train_model(reg, space)
return out
def xgb_clf(self, space):
"""Train xgboost classification model."""
clf = xgb.XGBClassifier(**space['model_params'], n_jobs=self.n_jobs, verbosity=0)
out, _ = self._train_model(clf, space)
return out
def ctb_clf(self, space):
"""Train catboost classification model."""
clf = ctb.CatBoostClassifier(**space['model_params'])
out, _ = self._train_model(clf, space)
return out
def lgb_clf(self, space):
"""Train lightboost classification model."""
clf = lgb.LGBMClassifier(**space['model_params'], n_jobs=self.n_jobs, verbosity=0)
out, _ = self._train_model(clf, space)
return out
def xgb_clf_multi(self, space):
"""Train xgboost multi-class classification model."""
clf = xgb.XGBClassifier(**space['model_params'], n_jobs=self.n_jobs, verbosity=0)
out, _ = self._train_model(clf, space)
return out
# Transform results into dataframe
def _to_df(self, trials, verbose=3):
# Combine params with scoring results
df_params = pd.DataFrame(trials.vals)
df_scoring = pd.DataFrame(trials.results)
df = | pd.concat([df_params, df_scoring], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import copy
import numpy as np
import math
import random
import time
from Point import Point
from AntMain import Ant
import pandas as pd
class ViolatedRecord:
"""Violated record history for one point"""
def __init__(self, marker, vio_time, dep_time, is_check=False):
"""
:param marker: the name of parking spot
:param vioTime: the violation time of car in this parking spot
:param depTime: the departure time of car in this parking spot
:param isCheck: mark whether this records are be capture
"""
self.marker = marker
self.vioTime = vio_time
self.depTime = dep_time
self.isCheck = is_check
class Officer:
"""Find path """
SA_STOPPiNG_ITER = 5000 # stop iteration of SA, type(int)
SA_A = 0.995 # Annealing coefficient, type(float)
SA_STOPPING_TEMP = 0.00001 # stop temperature of SA, type(float)
CROSS_RATE = 0.7 # crossover probability of GA, type(float)
MUTATE_RATE = 0.3 # mutation probability of GA, type(float)
POP_SIZE = 500 # population size of GA, type(int)
N_GENERATION = 50 # generation size of GA, type(int)
ANT_COUNT = 10 # ant count of ACO, type(int)
ACO_GENERATION = 20 # generation size of ACO, type(int)
ALPHA = 1.0 # weight of pheromones, type(float)
BETA = 10.0 # weight of visibility, type(float)
RHO = 0.5 # pheromone evaporation coefficient, type(float)
Q = 10 # pheromone enhancement coefficient, type(int)
def __init__(self, distance_data, violation_list, walk_speed, cost_time_data, start_point,
start_time, end_time, interval_time, prob_data, cluster_data, day_state, index):
self.walkingSpeed = walk_speed # walk speed of police, type(int)
self.startTime = start_time # start time of find path, type(int)
self.startPoint = copy.deepcopy(start_point) # start point of find path
self.endTime = end_time # end time of find path, type(int)
self.saveTime = 0 # save time of the final path, type(int)
self.intervalTime = interval_time # time for police to deal with violations, type(int)
self.distanceData = distance_data # distance of two points
self.parkSpotData = distance_data.index.tolist() # park plots
self.costTime = cost_time_data # cost time of two points
self.point = self.get_point(violation_list, self.parkSpotData) # park plots with its violation records
self.unChangePoint = copy.deepcopy(self.point) # unchanging park plots with its violation records
self.m_path = [] # final path
self.arrive = [] # arrive time of each point of the best solution
self.totalProbability = [] # capture probability of each point of the best solution
self.totalDistance = [] # total distance of the best solution
self.benefit = 0 # benefits of the best solution
self.adjust_arrive = [] # arrive time of the adjust solution
self.adjust_candidate = [] # access point of the adjust solution
self.adjust_probability = [] # capture probability of each point of the adjust solution
self.provisionally_time = [] # somewhere need save provisionally arrive time of provisionally solution
self.provisionally_prob = [] # somewhere need save provisionally capture probability of provisionally solution
self.provisionally_path = [] # somewhere need save provisionally solution
self.solution = [] # mutation results of GA or candidate next points of ACO
self.sa_T = 0 # initial temperature of SA
self.cur_path = [] # current solution of SA or GA
self.cur_arrive = [] # current arrive time of each points of current solution of GA or SA
self.ga_cur_benefit = [] # current benefits of population size solution of GA
self.cur_benefits = 0 # current benefits of current solution of SA or ACO
self.cur_save_time = 0 # current save time of current solution of SA
self.cur_probability = [] # current capture probability of current solution of SA
self.iteration = 0 # current iteration of SA
self.pheromone = copy.deepcopy(self.costTime) # initialize the pheromone table of ACO
self.prob_data = prob_data
self.cluster_data = cluster_data
self.day_state = day_state
self.index = str(index)
def get_point(self, violation_list, park_spot_data):
"""Initial points """
m_point = [] # park plots with its violation records
vio = [] # violation time
dep = [] # departure time
# for index in range(len(park_spot_data)):
for index in range(len(park_spot_data)):
# print(index)
point = Point(index, park_spot_data[index])
for index1, row1 in violation_list.iterrows():
if park_spot_data[index] == row1['street_marker']:
violation_time = self.time_transfer(row1['vio_time'])
departure_time = self.time_transfer(row1['departure_time'])
vio.append(violation_time)
dep.append(departure_time)
point.violated = True
vio.sort()
dep.sort()
for i in range(len(vio)):
point.vioRecords.append(ViolatedRecord(park_spot_data[index], vio[i], dep[i]))
m_point.append(point)
vio.clear()
dep.clear()
return m_point
def time_transfer(self, time):
"""get total minutes from a time in a day"""
hour_str, minute_str = time.split(':')
hour = int(hour_str)
minute = int(minute_str)
return hour * 60 + minute
def output(self, a):
"""Output the results of each method """
test = pd.DataFrame(data=self.m_path)
test.to_csv(a + self.index + '_path.csv')
self.totalDistance.clear()
self.calculate_total_distance()
test1 = pd.DataFrame(data=self.totalDistance)
test1.to_csv(a + self.index + '_dis.csv')
output_benefit = []
output_benefit.append(self.benefit)
test2 = pd.DataFrame(data=output_benefit)
test2.to_csv(a + self.index + '_pro.csv')
test3 = | pd.DataFrame(data=self.arrive) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = | Categorical(['A', 'B', 'B', 'B', np.nan]) | pandas.Categorical |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from sklearn.metrics import homogeneity_score, completeness_score, adjusted_mutual_info_score, adjusted_rand_score
from sklearn.metrics import silhouette_samples
from sklearn.metrics.cluster import contingency_matrix
# For reproducibility
np.random.seed(1000)
# Download from: https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data
# Change <data_folder> with the actual path
bc_dataset_path = '<data_folder>/wdbc.data'
bc_dataset_columns = ['id','diagnosis', 'radius_mean', 'texture_mean', 'perimeter_mean',
'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean',
'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean',
'radius_se','texture_se', 'perimeter_se', 'area_se', 'smoothness_se',
'compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se',
'fractal_dimension_se', 'radius_worst', 'texture_worst', 'perimeter_worst',
'area_worst', 'smoothness_worst', 'compactness_worst', 'concavity_worst',
'concave points_worst', 'symmetry_worst', 'fractal_dimension_worst']
if __name__ == '__main__':
# Load the dataset
df = pd.read_csv(bc_dataset_path, index_col=0, names=bc_dataset_columns).fillna(0.0)
# Show the overall statistical properties
print(df.describe())
# Show the pair-plot
sns.set()
with sns.plotting_context("notebook", font_scale=1.2):
sns.pairplot(df,
vars=['perimeter_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean'],
hue="diagnosis")
plt.show()
# Project the dataset on a bidimensional plane
cdf = df.drop(['diagnosis'], axis=1)
tsne = TSNE(n_components=2, perplexity=10, random_state=1000)
data_tsne = tsne.fit_transform(cdf)
df_tsne = pd.DataFrame(data_tsne, columns=['x', 'y'], index=cdf.index)
dff = pd.concat([df, df_tsne], axis=1)
# Show the diagram
fig, ax = plt.subplots(figsize=(18, 11))
with sns.plotting_context("notebook", font_scale=1.5):
sns.scatterplot(x='x',
y='y',
hue='diagnosis',
size='area_mean',
style='diagnosis',
sizes=(30, 400),
palette=sns.color_palette("husl", 2),
data=dff,
ax=ax)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
plt.show()
# Perform a K-Means clustering with K=2
km = KMeans(n_clusters=2, max_iter=1000, random_state=1000)
Y_pred = km.fit_predict(cdf)
df_km = | pd.DataFrame(Y_pred, columns=['prediction'], index=cdf.index) | pandas.DataFrame |
import os, glob, sys
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
import re
def load_data(path):
"""Load training and testing datasets based on their path
Parameters
----------
path : relative path to location of data, should be always the same (string)
Returns
-------
Training and testing Dataframes
"""
train = pd.read_csv(os.path.join(path,'train.csv'))
test = pd.read_csv(os.path.join(path,'test.csv'))
return train, test
def modify_fare(df, n: int = 4):
"""Introduce n new intervals (based on quantiles) for the feature fare, such that it is modified from
being continuous to being discrete
Parameters
----------
df : panda dataframe
n: number of new intervals (int)
Returns
-------
Original dataframe with discretized version of the feature 'Fare', categories
"""
df['Fare'] = df['Fare'].fillna(df['Fare'].median())
df['Fare'] = pd.qcut(df['Fare'], n, labels = list(string.ascii_uppercase)[:n])
return df
def get_size_family(df, mod: bool = False):
"""Defines family relations based on the features 'SibSp' (the # of siblings / spouses aboard the Titanic)
and 'Parch' (the # of parents / children aboard the Titanic)
Parameters
----------
df : panda dataframe
Returns
-------
Original dataframe with a new feature called 'FamilySize'
"""
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
if mod:
bins_ = [0,1,2,12]
df['FamilySize'] = pd.cut(df["FamilySize"], bins = bins_, labels = list(string.ascii_uppercase)[:len(bins_)-1])
return df
def get_title(name):
"""Search for individual title in a string by considering it to have a ASCII format from A-Z
Parameters
----------
name : The name from which a title wants to be extracted (string)
Returns
-------
String associated to a found title
"""
title_search = re.search(' ([A-Za-z]+)\.', name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
def get_titles(df, mod: bool = True):
"""Search for all titles inside a dataframe, given the feature 'Name'
Parameters
----------
df : panda dataframe
mod : simplify the extend of titles available (boolean)
Returns
-------
Original dataframe with a new feature called 'Title'
"""
df['Title'] = df['Name'].apply(get_title)
if mod:
# perform modifications
df['Title'] = df['Title'].replace('Mlle', 'Miss')
df['Title'] = df['Title'].replace('Ms', 'Miss')
df['Title'] = df['Title'].replace('Mme', 'Mrs')
return df
def get_all_ages(df, n: int = 5):
"""Fills in empty Ages based on the Title of a person, and then introduces n intervals for the feature 'Ages',
such that it is modified from being continuous to be discrete
Parameters
----------
df : panda dataframe
n: number of new intervals (int)
Returns
-------
Discretized version of the feature 'Age', categories
"""
emb = []
for i, row in df.iterrows():
if pd.isnull(row['Age']):
title = row['Title']
age_avg = df['Age'][df['Title'] == title].mean()
age_std = df['Age'][df['Title'] == title].std()
emb.append(np.random.randint(age_avg - age_std, age_avg + age_std, size=1)[0])
else:
emb.append(row['Age'])
# Update column
df['Age'] = emb
# Create new column
df["Age"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df
def get_age2(df):
"""Fills in empty Ages based on the Title of a person. DR
Parameters
----------
df : panda dataframe
Returns
-------
Dataframe with missing values for age filled.
"""
ages_mean = df[['Title', 'Age']].groupby(['Title'],
as_index=False).mean().set_index('Title').rename(columns={'Age': 'mean'})
ages_std = df[['Title', 'Age']].groupby(['Title'], as_index=False).std().set_index('Title').rename(columns={'Age': 'std'})
ages_title = pd.merge(ages_mean,ages_std, how='inner', left_index=True, right_index=True)
age = []
for i, Port in df.iterrows():
if pd.isnull(Port['Age']):
age.append(np.random.normal(ages_title.loc[Port['Title'],'mean'],ages_title.loc[Port['Title'],'std']))
else:
age.append(Port['Age'])
# Update column
df['Age'] = age
return df
def get_age_group(df,n: int=10):
"""Assigns a category to the age DR
Parameters
----------
df : Dataframe
n : number of categories
Returns
-------
Dataset with Age_group column
"""
df["Age_group"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df
def modify_titles(df):
"""Concatenates titles found to be similar or considered to be simplified in one category
Parameters
----------
df : panda dataframe
Returns
-------
Simplified categories in the features 'Title'
"""
# join less representative cotegories
df['Title'] = df['Title'].replace(['Lady', 'Countess',
'Capt', 'Col', 'Don', 'Dr', 'Major',
'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
return df
def get_deck(name):
"""Search for individual Capital letter inside a string associated to the cabin of a person, from A-Z
Parameters
----------
name : The name from which a deck wants to be extracted (string)
Returns
-------
Letter associated with the deck from that a person has
"""
if pd.isnull(name):
return 'None'
else:
title_search = re.findall(r"^\w", name)
# If the title exists, extract and return it.
if title_search:
return title_search[0]
else:
return 'None'
def get_decks(df):
"""Search for the information of all decks inside a dataframe, given the feature 'Cabin'
Parameters
----------
df : panda dataframe
Returns
-------
Original dataframe with a new feature called 'Deck'
"""
df['Deck'] = df['Cabin'].apply(get_deck)
# Modifications
df['Deck'] = df['Deck'].replace('T', 'None')
return df
def embarked_bayes(df, i):
"""Using Bayes Theorem, and based on 'Pclass', determine the probability of 'Embarked' for a person
given the possibilities S, C or Q.
Parameters
----------
df : panda dataframe
Returns
-------
String associated to the most likely port from where a passenger Embarked, given its Pclass
"""
pclass_ = df['Pclass'].iloc[i]
# P(s|1) = P(s)*P(1|S)/[ P(s)*P(1|s) + P(s)*P(1|s) + P(s)*P(1|s)] # probability that given the class 1, the person came from port S
P_S, P_C, P_Q = df['Embarked'].value_counts()['S'], df['Embarked'].value_counts()['C'], \
df['Embarked'].value_counts()['Q']
P_class_S = df['Embarked'][df['Pclass'] == pclass_].value_counts()['S']
P_class_C = df['Embarked'][df['Pclass'] == pclass_].value_counts()['C']
P_class_Q = df['Embarked'][df['Pclass'] == pclass_].value_counts()['Q']
res = []
P_S_class = (P_S * P_class_S) / ((P_S * P_class_S) + (P_C * P_class_C) + (P_Q * P_class_Q))
res.append(P_S_class)
P_C_class = (P_C * P_class_C) / ((P_S * P_class_S) + (P_C * P_class_C) + (P_Q * P_class_Q))
res.append(P_C_class)
P_Q_class = (P_Q * P_class_Q) / ((P_S * P_class_S) + (P_C * P_class_C) + (P_Q * P_class_Q))
res.append(P_C_class)
if sorted(res, reverse=True)[0] == P_S_class:
return 'S'
elif sorted(res, reverse=True)[0] == P_C_class:
return 'C'
elif sorted(res, reverse=True)[0] == P_Q_class:
return 'Q'
def get_embarked_bayes(df):
"""Search for the Embarked information of passengers missing this data, based on its 'Pclass'
Parameters
----------
df : panda dataframe
Returns
-------
Original dataframe with all missing values from the feature 'Embarked'
"""
emb = []
for i, Port in df.iterrows():
if pd.isnull(Port['Embarked']):
emb.append(embarked_bayes(df, i))
else:
emb.append(Port['Embarked'])
# Update column
df['Embarked'] = emb
return df
def get_if_cabin(df):
"""Indicate if a person has a 'Cabin'
Parameters
----------
df : panda dataframe
Returns
-------
String with a Yes or No
"""
# Feature that tells whether a passenger had a cabin on the Titanic
df['Has_Cabin'] = df["Cabin"].apply(lambda x: 'No' if type(x) == float else 'Yes')
return df
def get_type_ticket(df):
"""Indicate if a person has a 'Ticket'
Parameters
----------
df : panda dataframe
Returns
-------
Categorical unique code
"""
# Feature that tells whether a passenger had a cabin on the Titanic
df['Type_Ticket'] = df['Ticket'].apply(lambda x: x[0:3])
df['Type_Ticket'] = df['Type_Ticket'].astype('category').cat.codes # ordinal encoding
df['Type_Ticket'] = df['Type_Ticket'].astype(int)
return df
def get_count_name(df):
"""Indicate if a person has a 'Name'
Parameters
----------
df : panda dataframe
Returns
-------
Categorical unique code
"""
# Feature that tells whether a passenger had a cabin on the Titanic
df['Words_Count'] = df['Name'].apply(lambda x: len(x.split())).astype(int)
return df
def drop_features(df, to_drop):
"""Drop unwanted features
Parameters
----------
df : panda dataframe
to_drop : array with name of features to be dropped
Returns
-------
Original dataframe with all original features but those in to_drop
"""
return df.drop(to_drop, axis=1)
def bookkeeping_results(frame, name, model, cv_):
"""Dataframe to save results from experiment
Parameters
----------
frame : empty dataframe with desired features
name : name model to save, string
model : model used, object
cv_ : results from cross validation, array
Returns
-------
Updated original dataframe with the new added result from model
"""
# Initialize a dictionary to save the results.
res = {}
res['Model'] = name
res['best_params'] = [model.get_params()]
res['cv_acc'] = cv_.mean() * 100
res['cv_acc_std'] = cv_.std() * 100
return pd.concat([frame, | pd.DataFrame(res) | pandas.DataFrame |
import os
import warnings
import argparse
from pathlib import Path
import netCDF4
import pandas as pd
import numpy as np
from geotiff import GeoTiff
from tqdm import tqdm
from sklearn.metrics import pairwise_distances
from sklearn.model_selection import GroupShuffleSplit
from tools.settings import CLIMATE_OPT, CAT_OPT, FEATURES_COLS, START_VAL_COLS, TARGET_COLS
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(prog='Подготовка данных',
description =
"""
Скрипт формирует данные для обучения Neural ODE
По гипотезе на вход подаются:
- t2m (температура на 2м)
- td2m (точка росы на 2м)
- ff (скорость ветра)
- R (осадки за 6,12,24 часа опционально)
- phi(t) (периодическая ф-ия по времени)
- climate (temp, soil, precip) (климатические характеристики температуры, влагозапаса и осадков)
- soil type (тип почвы)
- cover type (тип подстилающей поверхности)
- kult type (тип выращиваемой культуры)
- val_1, val_2 (ЗПВ на момент времени t0)
На выходе производная по влагозапасу:
- new val_1, val_2 (ЗПВ на момент времени t1)
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-d', '--dist', type=float, default=1, help='Убрать станции с большим расстоянием')
parser.add_argument('-ts', '--test_size', type=float, default=0.1, help='Доля валидационной выборки от общей')
opt = parser.parse_args()
def load_syn(path: str) -> pd.DataFrame:
syn = pd.read_csv(path, usecols=['s_ind', 'datetime', 't2m', 'td2m', 'ff', 'R12'])
syn.loc[syn.datetime.astype(str).str.len() == 7, 'datetime'] = '0'+\
syn[syn.datetime.astype(str).str.len() == 7].datetime.astype(str)
syn.loc[:, 'datetime'] = | pd.to_datetime(syn.datetime, format='%y%m%d%H') | pandas.to_datetime |
import os, glob
import pandas as pd
from datetime import datetime as dt
from pathlib import Path
from emotion_recognition import EmotionRecognizer
from pylab import *
import numpy as np
import seaborn as sn
from progressbar import *
import pickle
import ntpath
from pathlib import Path
import shutil
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
class CordioESP_ToolBox:
"""
Expressive Speech Processing
This class manage emotion detection from speech.
* Emotion class support:
'neutral', 'calm', 'happy', 'sad', 'angry', 'fear', 'disgust', 'ps', 'boredom'
* CordioESP_ToolBox support all scikit-learn models, for example:
'SVC', 'AdaBoostClassifier', 'RandomForestClassifier', 'GradientBoostingClassifier', 'DecisionTreeClassifier'
* example for usage can be find in:
Main_test.py or in Main_create_emotion_label_table_and_emotion_labaled_scalarDist_table.py
Cordio Medical - Confidential
Version: 0.1 2020-04-27
Revision History:
| Ver | Author | Date | Change Description
|----------|-----------|----------------|--------------------
| 0.1 | Or | 2020-04-27 | Initial
| x.x | xxxx | xxxx-xx-xx | x
"""
def __init__(self):
self.version = 0.1
self.suported_emotions = ['neutral', 'calm', 'happy', 'sad', 'angry', 'fear', 'disgust', 'ps', 'boredom']
self.supported_models = ['SVC', 'AdaBoostClassifier', 'RandomForestClassifier', 'GradientBoostingClassifier',
'DecisionTreeClassifier', 'KNeighborsClassifier', 'MLPClassifier']
self.model_emotion_dict = {'SVC': ['angry', 'sad', 'neutral'],
'AdaBoostClassifier': ['sad', 'fear', 'boredom', 'neutral'],
'RandomForestClassifier': ['sad', 'fear', 'boredom', 'neutral'],
'KNeighborsClassifier': ['sad', 'fear', 'boredom', 'neutral']}
self.model_list = [SVC(probability=True), AdaBoostClassifier(), RandomForestClassifier(), KNeighborsClassifier()]
def save_object(self, obj, save_url_path, filename):
# create folders in path if not exist:
Path(save_url_path).mkdir(parents=True, exist_ok=True)
with open(save_url_path+'\\'+filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def modelTrain(self, model, emotions_list):
# my_model probability attribute needs to be Truth!
save_url_path = 'trained_models'
filename = 'trained_' + type(model).__name__ + '_ESPVer' + str(self.version) + '.pkl'
# check if model exist:
if os.path.exists(save_url_path+'\\'+filename):
with open(save_url_path+'\\'+filename, 'rb') as input:
rec = pickle.load(input)
else:
# train the model
# pass my model to EmotionRecognizer instance
# and balance the dataset
rec = EmotionRecognizer(model=model, emotions=emotions_list, balance=True, verbose=0, probability=True)
rec.train()
self.save_object(rec, save_url_path, filename)
return rec
def modelPredict(self, rec, wav_url):
try:
out = rec.predict_proba(wav_url)
# wav_url == '\\\\192.168.55.210\\f$\\db\\BSV\\BSV-0009\\BSV-0009_200403_105407_S0007_he_1.54_SMJ400F_Android26.wav'
except ValueError:
wavPath = Path(wav_url)
print('\nempty file skipped: '+wavPath.name)
out = 'empty file'
except RuntimeError:
wavPath = Path(wav_url)
print("\nFixing header: "+wavPath.name)
LOCAL_PATH = os.getcwd()
shutil.copyfile(wav_url, LOCAL_PATH+'\\'+wavPath.name)
os.system("ffmpeg -nostats -loglevel 0 -i " + LOCAL_PATH+'\\'+wavPath.name + " -f s16le -acodec pcm_s16le -y temp.pcm")
os.system("ffmpeg -nostats -loglevel 0 -f s16le -ar 48.0k -ac 1 -i temp.pcm " + LOCAL_PATH+'\\'+wavPath.name + " -y")
print("\nDone fixing the header")
out = rec.predict_proba( LOCAL_PATH+'\\'+wavPath.name )
# remove temp files:
os.remove(LOCAL_PATH+'\\'+wavPath.name)
os.remove(LOCAL_PATH + '\\temp.pcm')
return out
def predict_all_proba_for_patientNmodel(self, model, fileHandle, clinicalInformation, patient_info_column_names,
emotions_list, all_wavs):
df_len = len(all_wavs)
patientNmodel_df = pd.DataFrame(index=np.arange(df_len),
columns=patient_info_column_names + ['Model'] + emotions_list)
model_name = model if type(model)==str else type(model).__name__
rec = self.modelTrain(model, self.model_emotion_dict[model_name])
# progress bar initialization:
p = Path(str(all_wavs[0]))
# fileHandle = CordioFile
patient_ID = fileHandle.CordioExtractPatient(p)
patient_model = type(model).__name__
widgets = [FormatLabel('<patient: ' + patient_ID + '; model: ' + patient_model + '>'), ' ', Percentage(), ' ',
Bar('#'), ' ', RotatingMarker()]
progressbar = ProgressBar(widgets=widgets, maxval=df_len)
progressbar.start()
# fill df:
for (i, wav) in zip(range(df_len), all_wavs):
# progress bar update:
widgets[0] = FormatLabel('<filename-{0}>'.format(i))
progressbar.update(i)
# add soft decision score for each emotion
patientNmodel_df.loc[i] = self.modelPredict(rec, wav)
# insert basic information:
p = Path(str(wav))
# fileHandle = CordioFile
patientNmodel_df.at[i, "PatientName"] = fileHandle.CordioExtractPatient(p)
patientNmodel_df.at[i, "Date"] = fileHandle.CordioExtractRecordingDateTime(p).strftime("%d/%m/%Y")
patientNmodel_df.at[i, "Time"] = fileHandle.CordioExtractRecordingDateTime(p).strftime("%H:%M:%S")
patientNmodel_df.at[i, "sentence"] = fileHandle.CordioExtractSentence(p)
patientNmodel_df.at[i, "Language"] = fileHandle.CordioExtractLanguage(p)
# TODO: add App version, Device identifier and OS version columns
# setting clinical status:
clinicalStatus = self.get_clinical_info(clinicalInformation, fileHandle.CordioExtractRecordingDateTime(p),
patientNmodel_df.at[i, "PatientName"])
patientNmodel_df.at[i, "ClinicalStatus"] = clinicalStatus
# setting model:
patientNmodel_df.at[i, "Model"] = type(model).__name__
progressbar.finish()
return patientNmodel_df
# def predict_all_proba_for_patient(self, patientDir_path, clinicalInformation, fileHandle, model_list,
# emotions_list):
# # get all wavs:
# all_wavs = glob.glob(os.path.join(patientDir_path, '*.wav'))
# num_of_wav = len(all_wavs) * len(model_list)
#
# # create basic information table for patient:
# patient_info_column_names = ["PatientName", "Date", "Time", "sentence", "Language", "ClinicalStatus"]
# patient_df = pd.DataFrame(columns=patient_info_column_names + ['Model'] + emotions_list)
#
# # progress bar initialization:
# p = Path(str(all_wavs[0]))
# # fileHandle = CordioFile
# patient_ID = fileHandle.CordioExtractPatient(p)
# widgets = [FormatLabel('<<patient: ' + patient_ID + '; all models process>>'), ' ', Percentage(), ' ',
# Bar('#'), ' ', RotatingMarker()]
# progressbar = ProgressBar(widgets=widgets, maxval=len(model_list))
# progressbar.start()
#
# # calculating for all models:
# for i, model in zip(range(len(model_list)), model_list):
# # progress bar update:
# widgets[0] = FormatLabel('<filename-{0}>'.format(i))
# progressbar.update(i)
#
# # --for debug:
# sentence = 'S0007'
# tmp = self.create_ESP_labeled_table(patientDir_path, model, sentence, emotions_list, clinicalInformation,
# fileHandle)
# # --
# tmp = self.predict_all_proba_for_patientNmodel(model, fileHandle, clinicalInformation,
# patient_info_column_names, all_wavs)
#
# patient_df = patient_df.append(tmp)
# progressbar.finish()
#
# return patient_df
def get_clinical_info(self, clinicalInformation, recording_datetime, patient_id):
clinicalInfo = clinicalInformation(patient_id, '')
clinicalStatusCode = clinicalInfo(recording_datetime)
clinicalStatus = "dry"
if clinicalStatusCode == -1:
# recording is not valid (before patient registration)
clinicalStatus = 'recording is not valid (before patient registration)'
elif clinicalStatusCode == clinicalInfo.CLINICAL_STATUS_UNKNOWN:
clinicalStatus = "unknown"
elif clinicalStatusCode == clinicalInfo.CLINICAL_STATUS_WET:
clinicalStatus = "wet"
return clinicalStatus
def SaveFig(self, fig, save_url_path, save_file_name, add_datetime, close_fig):
# from pathlib import Path
# create folders in path if not exist:
Path(save_url_path).mkdir(parents=True, exist_ok=True)
# remove old file with the same name if exist:
if os.path.isfile(save_url_path + "\\" + save_file_name + ".png"):
os.remove(save_url_path + "\\" + save_file_name + ".png")
plt.ioff()
# save file:
# datetime object containing current date and time
now = dt.now()
if (add_datetime == []) or (add_datetime == True):
dt_string = now.strftime("%d%m%y_%H%M%S")
fig.savefig(save_url_path + "\\" + save_file_name + dt_string + ".png", bbox_inches='tight')
else:
fig.savefig(save_url_path + "\\" + save_file_name + ".png", bbox_inches='tight')
if close_fig:
plt.close(fig)
def SaveTable(self, table, save_url_path, save_file_name, add_datetime, is_index_col=True):
# from pathlib import Path
# create folders in path if not exist:
Path(save_url_path).mkdir(parents=True, exist_ok=True)
# remove old file with the same name if exist:
if os.path.isfile(save_url_path + "\\" + save_file_name + ".png"):
os.remove(save_url_path + "\\" + save_file_name + ".png")
plt.ioff()
# save file:
# datetime object containing current date and time
now = dt.now()
if (add_datetime == []) or (add_datetime == True):
dt_string = now.strftime("%d%m%y_%H%M%S")
table.to_csv(save_url_path + "\\" + save_file_name + "_" + dt_string + '.csv', index=is_index_col)
else:
table.to_csv(save_url_path + "\\" + save_file_name + '.csv', index=is_index_col)
def get_table_by_session(self, prob_table, session_hour_range, session_action, emotions_list):
# TODO: add description
# prob_table check: check necessary columns existence
prob_table_col_names = list(prob_table.columns)
if 'Unnamed: 0' in prob_table_col_names:
prob_table.drop('Unnamed: 0', axis=1)
prob_table['Date'] = pd.to_datetime(prob_table['Date'], format="%d/%m/%Y")
prob_table['Time'] = pd.to_datetime(prob_table['Time'], format="%H:%M:%S")
# initial graphs df:
emotions_in_prob_table_idx = [idx for idx, val in enumerate(self.suported_emotions) if val in prob_table_col_names]
emotions_in_prob_table = [self.suported_emotions[i] for i in emotions_in_prob_table_idx]
graphs_df_col_names = ['Patient_id', 'SessionIdx', 'Date', 'FirstSessionRecTime',
'LastSessionRecTime', 'Model', 'IsWet'] + emotions_in_prob_table
graphs_df = pd.DataFrame(columns=graphs_df_col_names)
# fill graphs_df:
unique_dates = prob_table.Date.dt.strftime("%d/%m/%Y").unique()
unique_dates = [x for x in unique_dates if str(x) != 'nan'] # remove nans
prob_table = prob_table.sort_values(['Date', 'Time'], ascending=[True, True])
session_idx = 0
for date in unique_dates:
# get current date sub-df
dt_date = dt.strptime(date, "%d/%m/%Y")
# mean probabilities for each model type:
unique_model_types = prob_table.Model.unique()
# remove unsapported models:
unique_model_types = [val for idx, val in enumerate(self.supported_models) if val in unique_model_types]
for model in unique_model_types:
prob_table_dateNmodel_sub_df = prob_table[(prob_table['Model'] == model) & (prob_table['Date'] == dt_date)]
curr_time_idx = prob_table_dateNmodel_sub_df.index.values[0] # first index of prob_table_dateNmodel_sub_df
curr_time = | pd.to_datetime(prob_table_dateNmodel_sub_df['Time'].loc[curr_time_idx], format="%H:%M:%S") | pandas.to_datetime |
import pandas as pd
import numpy as np
attr = pd.read_csv('GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt', sep='\t')
reader = pd.read_csv('GTEx_Analysis_2017-06-05_v8_RSEMv1.3.0_transcript_tpm.gct', sep='\t', chunksize=10000)
chunks = []
for chunk in reader:
chunks.append(chunk)
expressions = pd.concat(chunks)
sample_names = attr['SAMPID']
terms = ['SMTS', 'SMTSD']
for term in terms:
samples_sum = 0
groups = np.unique(attr[term])
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import time
import os
def symbol_to_path(symbol, basedir='data'):
return os.path.join(basedir, '{}.csv'.format(symbol))
def get_data(symbols, dates):
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols:
symbols.insert(0, 'SPY')
for symbol in symbols:
dftmp = pd.read_csv(symbol_to_path(symbol),
index_col='Date',
parse_dates=True,
usecols=['Date', 'Adj Close'],
na_values='nan')
dftmp = dftmp.rename(columns={'Adj Close': symbol})
df = df.join(dftmp, how='left')
if 'SPY' == symbol:
df = df.dropna(subset=['SPY'])
return df
def plot_data(df, title='Stock Prices'):
ax = df.plot(title=title, fontsize=12, linewidth=0.7)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
def fill_missing_values(df):
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
def p06_using_fillna():
dates = | pd.date_range(start='2010-01-01', end='2012-12-31') | pandas.date_range |
#from matplotlib.pyplot import title
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import plotly.express as px
from modzy import ApiClient
from modzy._util import file_to_bytes
import json
from sklearn.manifold import TSNE
import numpy as np
from pyvis.network import Network
from sklearn.cluster import KMeans
from wordcloud import WordCloud
import matplotlib.pyplot as plt
st.set_option('deprecation.showPyplotGlobalUse', False)
st.sidebar.image('text.png')
#col1,col2 = st.columns([1,6])
st.image('subtext.png')
#df=pd.read_csv("https://raw.githubusercontent.com/pupimvictor/NetworkOfThrones/master/stormofswords.csv")
df = pd.read_csv("https://raw.githubusercontent.com/napoles-uach/Data/main/got1.csv")
df=df[['Source','Target','weight']]
#st.write(df)
#weigths=df['weight'].tolist()
def got_func():
got_net = Network(height="600px", width="100%", heading='A song of Ice and Fire (Book 1) Graph')#,bgcolor='#222222', font_color='white')
# set the physics layout of the network
#got_net.barnes_hut()
got_net.force_atlas_2based()
#got_net.show_buttons(filter_=True)
#got_data = pd.read_csv("https://www.macalester.edu/~abeverid/data/stormofswords.csv")
got_data = pd.read_csv("https://raw.githubusercontent.com/napoles-uach/Data/main/got1.csv")
#got_data = pd.read_csv("stormofswords.csv")
#got_data.rename(index={0: "Source", 1: "Target", 2: "Weight"})
sources = got_data['Source']
targets = got_data['Target']
weights = got_data['weight']
edge_data = zip(sources, targets, weights)
for e in edge_data:
src = e[0]
dst = e[1]
w = e[2]
got_net.add_node(src, src, title=src, color='red')
got_net.add_node(dst, dst, title=dst,color='red')
got_net.add_edge(src, dst, value=w)
neighbor_map = got_net.get_adj_list()
# add neighbor data to node hover data
for node in got_net.nodes:
node["title"] += " Neighbors:<br>" + "<br>".join(neighbor_map[node["id"]])
node["value"] = len(neighbor_map[node["id"]])
got_net.show("gameofthrones.html")
got_func()
HtmlFile = open("gameofthrones.html", 'r', encoding='utf-8')
source_code = HtmlFile.read()
#check_graph = st.sidebar.checkbox('Show Graph')
#if check_graph:
with st.expander('Show Graph'):
components.html(source_code, width=670,height=700)
text = open("edges.txt","w")
text.write('graph')
for i in range(len(df)):
text.write('\n%s' % str(df.iloc[i][0]).replace(" ", "")+" "+str(df.iloc[i][1]).replace(" ", "")+" "+str(df.iloc[i][2]))
text.close()
f = open('edges.txt','r',encoding='utf-8')
client = ApiClient(base_url="https://app.modzy.com/api", api_key="<KEY>")
sources = {}
sources["my-input"] = {
"edges.txt": f.read(),
}
@st.cache()
def res(sources):
job = client.jobs.submit_text("sixvdaywy0", "0.0.1", sources)
result = client.results.block_until_complete(job, timeout=None)
return result
#job = client.jobs.submit_text("sixvdaywy0", "0.0.1", sources)
#result = client.results.block_until_complete(job, timeout=None)
result = res(sources)
#st.button('Download')
#st.balloons()
#st.stop()
results_json = result.get_first_outputs()['results.json']
x = results_json['Node Embeddings']
names_dict = []
vec_dict = []
for names in x:
names_dict.append(names)
v=x[names].split()
vec_dict.append(v)
# convert a list of string numbers to a list of float numbers
def convert_to_float(l):
return [float(i) for i in l]
vec_dict = [convert_to_float(i) for i in vec_dict]
chart_data= | pd.DataFrame(vec_dict) | pandas.DataFrame |
import random
import timeit
import string
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Categorical, date_range, read_csv
from pandas.compat import PY2
from pandas.compat import cStringIO as StringIO
from ..pandas_vb_common import setup, BaseIO # noqa
class ToCSV(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
params = ['wide', 'long', 'mixed']
param_names = ['kind']
def setup(self, kind):
wide_frame = DataFrame(np.random.randn(3000, 30))
long_frame = DataFrame({'A': np.arange(50000),
'B': np.arange(50000) + 1.,
'C': np.arange(50000) + 2.,
'D': np.arange(50000) + 3.})
mixed_frame = DataFrame({'float': np.random.randn(5000),
'int': np.random.randn(5000).astype(int),
'bool': (np.arange(5000) % 2) == 0,
'datetime': date_range('2001',
freq='s',
periods=5000),
'object': ['foo'] * 5000})
mixed_frame.loc[30:500, 'float'] = np.nan
data = {'wide': wide_frame,
'long': long_frame,
'mixed': mixed_frame}
self.df = data[kind]
def time_frame(self, kind):
self.df.to_csv(self.fname)
class ToCSVDatetime(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
rng = date_range('1/1/2000', periods=1000)
self.data = DataFrame(rng, index=rng)
def time_frame_date_formatting(self):
self.data.to_csv(self.fname, date_format='%Y%m%d')
class ReadCSVDInferDatetimeFormat(object):
goal_time = 0.2
params = ([True, False], ['custom', 'iso8601', 'ymd'])
param_names = ['infer_datetime_format', 'format']
def setup(self, infer_datetime_format, format):
rng = date_range('1/1/2000', periods=1000)
formats = {'custom': '%m/%d/%Y %H:%M:%S.%f',
'iso8601': '%Y-%m-%d %H:%M:%S',
'ymd': '%Y%m%d'}
dt_format = formats[format]
self.data = StringIO('\n'.join(rng.strftime(dt_format).tolist()))
def time_read_csv(self, infer_datetime_format, format):
read_csv(self.data, header=None, names=['foo'], parse_dates=['foo'],
infer_datetime_format=infer_datetime_format)
class ReadCSVSkipRows(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
params = [None, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
N = 20000
index = tm.makeStringIndex(N)
df = DataFrame({'float1': np.random.randn(N),
'float2': np.random.randn(N),
'string1': ['foo'] * N,
'bool1': [True] * N,
'int1': np.random.randint(0, N, size=N)},
index=index)
df.to_csv(self.fname)
def time_skipprows(self, skiprows):
read_csv(self.fname, skiprows=skiprows)
class ReadUint64Integers(object):
goal_time = 0.2
def setup(self):
self.na_values = [2**63 + 500]
arr = np.arange(10000).astype('uint64') + 2**63
self.data1 = StringIO('\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO('\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self):
| read_csv(self.data1, header=None, names=['foo']) | pandas.read_csv |
from root.config.db_config import SQLDatabase #importing Database class
import os
from flask import Flask, render_template, url_for, json
import re
import pandas as pd
from flask import request, jsonify
import numpy as np
import random
import string
import json
from root import app
conn = SQLDatabase() #creating Database class object
#cur = conn.getConnection()
home_directory = "C:\\workings\\Python\\testFolder\\"
UPLOAD_FOLDER = "C:\\workings\\Python\\testFolder\\"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def getFileDetails(ac_token, user_id):
try:
dbconn = conn.getConnection() #creating Database connection parameter
sql = "SELECT * FROM C_SYS_SRC_GEN WHERE AC_TOKEN = '" + str(ac_token) + "' AND USER_ID ='"+ str(user_id) +"'"
cursor = conn.query(sql, dbconn)
rv = cursor.fetchone()
return {'FILE_NAME': rv.TARGET_FILE_NAME,'FILE_PATH': rv.TARGET_FILE_PATH,'FILE_ID':rv.PKEY_SRC_GEN}
del dbconn
# except IOError as e:
# result = "I/O error"+str(e)
# except ValueError as e:
# result = "ValueError error"+str(e)
# except:
# raise
#
# return result
except Exception as e:
return {'error': str(e)}
def getProjectDetails(project_ac_token, user_id):
try:
dbconn = conn.getConnection() # creating Database connection parameter
sql = "SELECT * FROM C_SYS_PROJECTS WHERE AC_TOKEN = '" + str(project_ac_token) + "' AND USER_ID ='" + str(
user_id) + "'"
cursor = conn.query(sql, dbconn)
rv = cursor.fetchone()
return rv
del dbconn
# except IOError as e:
# result = "I/O error" + str(e)
# except ValueError as e:
# result = "ValueError error" + str(e)
# except:
# raise
#
# return result
except Exception as e:
return {'error': str(e)}
def getFileList(project_id, user_id):
try:
dbconn = conn.getConnection() # creating Database connection parameter
sql = "SELECT * FROM C_SYS_SRC_GEN WHERE PROJECT_ID = '" + str(project_id) + "' AND USER_ID ='" + str(user_id) + "' ORDER BY PKEY_SRC_GEN DESC"
cursor = conn.query(sql, dbconn)
rv = cursor.fetchall()
rowarray_list =[]
for value in rv:
t = {'PKEY_SRC_GEN': value.PKEY_SRC_GEN,
'FILE_NAME': value.TARGET_FILE_NAME,
'AC_TOKEN': value.AC_TOKEN
}
rowarray_list.append(t)
return rowarray_list
del dbconn
# except IOError as e:
# result = "I/O error" + str(e)
# except ValueError as e:
# result = "ValueError error" + str(e)
# except:
# raise
#
# return result
except Exception as e:
return {'error': str(e)}
def ran_gen(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def saveConsolidation(project_ac_token,user_id,actual_file, files ,con_file_name):
try:
dbconn = conn.getConnection() # creating Database connection parameter
project_details = getProjectDetails(project_ac_token, user_id)
project_id = project_details.PKEY_PROJECTS
getData = getFileDetails(actual_file, user_id)
json_url = os.path.join(os.path.abspath(getData['FILE_PATH']), "", getData['FILE_NAME'])
foo = open(json_url)
file1 = json.load(foo)
foo.close()
column1 = list()
result_data = list();
### get columns from 1st Array
for i in file1[0].keys():
column1.append(i)
for value in files:
if actual_file!=value:
column2 = list()
common_col = list()
### Read 2nd File
getData2 = getFileDetails(value, user_id)
json_url2 = os.path.join(os.path.abspath(getData2['FILE_PATH']), "", getData2['FILE_NAME'])
foo2 = open(json_url2)
file2 = json.load(foo2)
foo2.close()
### get columns from 2nd Array
for i in file2[0].keys():
column2.append(i)
new_array = np.intersect1d(column1, column2)
for i in new_array:
common_col.append(i)
### Merge Both array
A = | pd.DataFrame(file1) | pandas.DataFrame |
import requests
import glob
import os
import json
import zipfile
import shutil
import pandas as pd
import time
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'datasets')
api_adress = 'http://18.180.27.178:5050'
# api_adress = 'http://54.249.71.234:5050'
address_test_file = '/api/process'
address_upload_gt_dir = '/api/upload_gtdir'
address_current_synDir = "/api/current_synDir"
address_list_data_dir = "/api/list_data_dir"
address_view_data = "/api/view_data"
address_del_data = "/api/del_data"
address_down_data = "/api/down_data"
address_train_status = "/api/train_status"
address_trainning_history = "/api/trainning_history"
address_trainning_log = "/api/trainning_log"
address_stop_training = "/api/stop_training"
address_save_notes = '/api/save_notes'
address_train = '/api/train'
address_checkpoint_chose = '/api/checkpoint_chose'
address_down_checkpoint = '/api/down_checkpoint2'
address_get_hint = '/api/get_hint'
download_Server_Data = '/api/download_Server_Data'
address_upload_charlist = '/api/upload_charlist'
allowExtend = ['.jpg', '.JPG', '.png', '.PNG', '.jpeg', '.JPEG']
def zipdirImgs(imgpath, zipout):
# allowExtend = ['.jpg', '.JPG', '.png', '.PNG', '.jpeg', '.JPEG']
ziph = zipfile.ZipFile(zipout, "w", zipfile.ZIP_DEFLATED)
for f in os.listdir(imgpath):
if os.path.splitext(f)[-1] in allowExtend:
ziph.write(os.path.join(imgpath,f), f)
ziph.close()
def upload_file(filename):
files = {'file': open(filename, 'rb')}
r = requests.post(api_adress
+ address_test_file
, files=files, )
return r
def zipdir(imgpath,gtpath, zipout):
global allowExtend
ziph = zipfile.ZipFile(zipout, "w", zipfile.ZIP_DEFLATED)
gts = os.listdir(gtpath)
for f in os.listdir(imgpath):
if os.path.splitext(f)[-1] in allowExtend and '{}.txt'.format(os.path.splitext(f)[0]) in gts:
ziph.write(os.path.join(imgpath,f), os.path.join('imgs',f))
ziph.write(os.path.join(gtpath, '{}.txt'.format(os.path.splitext(f)[0])), os.path.join('gts', '{}.txt'.format(os.path.splitext(f)[0])))
ziph.close()
def request_current_synDir():
data = {'request': True}
r = requests.post(api_adress + address_current_synDir, data=data )
print(r.json())
print('request_current_synDir done',api_adress, r.status_code)
return r.json(), r.status_code
def upload_gt_dir(dir_imgs, dir_gts, newName):
import time
begin = time.time()
zipPath = os.path.join(DATA_DIR, '{}.zip'.format(newName))
zipdir(dir_imgs, dir_gts, zipPath)
files = {'file': open(zipPath, 'rb')}
res = requests.post(
url=api_adress + address_upload_gt_dir,
files=files
)
num = len(os.listdir(dir_gts))
print('upload {} files time: {}'.format(num * 2, time.time() - begin), api_adress, res.status_code)
# os.remove(zipPath)
mess = None if res.status_code == 200 else res.json()['mess']
return mess, res.status_code
def request_train(synDirs_chose, pretrain, numEpoch, prefixName, charlistName):
data = {'chose': synDirs_chose, 'pretrain':str(pretrain), 'numEpoch':numEpoch,\
'prefixName': str(prefixName), 'charlist': charlistName, 'syn_Ratio': 0.6, 'wiki_Ratio': 0.1, 'learning_rate':2e-5, 'use_hard_syn_gen':True}
print(data)
r = requests.post(api_adress + address_train, data=json.dumps(data), )
print('request_train done', api_adress, r.status_code)
mess = None if r.status_code==200 else r.json()['mess']
return mess , r.status_code
def upload_charlist(filename):
files = {'file': open(filename, 'rb')}
r = requests.post(api_adress
+ address_upload_charlist
, files=files, )
mess = None if r.status_code == 200 else r.json()['mess']
return mess, r.status_code
def request_train_status():
data = {'request': True}
r = requests.post(api_adress + address_train_status, data=data)
if r.status_code == 200 or r.status_code==201:
ret = r.json()
df = json.loads(ret['df'])
training_log = ret['training_log']
df = | pd.DataFrame.from_dict(df) | pandas.DataFrame.from_dict |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.classify_input_type import check_col_type
def string_split(table, **params):
check_required_parameters(_string_split, params, ['table'])
return _string_split(table, **params)
def _string_split(table, input_col, hold_cols=None, delimiter=',', output_col_name='split', output_col_cnt=3, output_col_type='double', start_pos=0, end_pos=0):
out_table = pd.DataFrame()
output_arr = [x[start_pos:-end_pos].split(delimiter, output_col_cnt) \
if not pd.isnull(x) \
else [x] * output_col_cnt for x in list(table[input_col])] \
if end_pos > 0 \
else [x[start_pos:].split(delimiter, output_col_cnt) \
if not pd.isnull(x) \
else [x] * output_col_cnt for x in list(table[input_col])]
head_arr = [x[:start_pos] \
if not pd.isnull(x) \
else '' for x in list(table[input_col])]
tail_arr = [x[-end_pos:] \
if not | pd.isnull(x) | pandas.isnull |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.