prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
# ---- script header ----
script name: functions.py
purpose of script: python fuctions required for running shellcast analysis
author: <NAME>
email: <EMAIL>
date created: 20200923
"""
import pandas # for data mgmt
import numpy # for data mgmt
import datetime as dt # for datetime mgmt
from pydap.client import open_url # to convert bin file
import requests # to check if website exists
from csv import writer
def aggregate_sco_ndfd_var_data(ndfd_var_data, var_period_index, var_period_vals, ndfd_var) :
"""
Description: Returns a tidy dataframe of qpf SCO NDFD data for a specified date
Parameters:
ndfd_var_data (pydap Dataset): Pydap dataset object for specified datetime, from get_sco_ndfd_data() function and need to select a specific variable within this raw dataset
var_period_index (integer): Integer value that represents where in ndfd_var_data the subperiod of interest is (e.g. 6 hr, 12hr)
var_period_vals (array): An array of subperiod values (e.g., 6hr, 12hr) for the full period of interest (e.g., 24hr)
ndfd_var (str): either "qpf" or "pop12", the SCO NDFD variable of interest
Returns:
var_agg_data_pd (data frame): A pandas dataframe with variable data aggregated to the full period of interest (e.g., 24hr)
Required:
import pandas
ndfd_var_data requires loading and running convert_sco_ndfd_datetime_str() and get_sco_ndfd_data() functions before this
Source: none, custom function
Note: Subperiods and periods are described as follows. For example, qpf data is reported in subperiods of 6 hours so to calculate qpf for 24 hours, you will have to sum 6, 12, 18, and 24 hour subperiods to get a full 24 hour period.
"""
# all data for 1-day forecast (24 hrs)
var_period_raw_data = ndfd_var_data.data[0][var_period_index[0]:(var_period_index[-1]+1)]
# now have to loop through and bind each of these 4 arrays together into one for 24hrs
# 24 hrs (compile dataframe from all subperiods)
var_period_full_df =
|
pandas.DataFrame()
|
pandas.DataFrame
|
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from re import sub
from collections import Counter
from itertools import product
from urllib.parse import urljoin
from string import ascii_lowercase
from zipfile import ZipFile
from io import BytesIO, StringIO
from sqlite3 import connect
from scipy.io import loadmat
import io
from rich.progress import track
import requests
import numpy as np
import pandas as pd
from sklearn.utils import check_X_y
from imblearn.datasets import make_imbalance
from research.utils import img_array_to_pandas
UCI_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/"
KEEL_URL = "http://sci2s.ugr.es/keel/keel-dataset/datasets/imbalanced/"
GIC_URL = "http://www.ehu.eus/ccwintco/uploads/"
OPENML_URL = "https://www.openml.org/data/get_csv/"
FETCH_URLS = {
"breast_tissue": urljoin(UCI_URL, "00192/BreastTissue.xls"),
"ecoli": urljoin(UCI_URL, "ecoli/ecoli.data"),
"eucalyptus": urljoin(OPENML_URL, "3625/dataset_194_eucalyptus.arff"),
"glass": urljoin(UCI_URL, "glass/glass.data"),
"haberman": urljoin(UCI_URL, "haberman/haberman.data"),
"heart": urljoin(UCI_URL, "statlog/heart/heart.dat"),
"iris": urljoin(UCI_URL, "iris/bezdekIris.data"),
"libras": urljoin(UCI_URL, "libras/movement_libras.data"),
"liver": urljoin(UCI_URL, "liver-disorders/bupa.data"),
"pima": "https://gist.githubusercontent.com/ktisha/c21e73a1bd1700294ef790c56c8aec1f"
"/raw/819b69b5736821ccee93d05b51de0510bea00294/pima-indians-diabetes.csv",
"vehicle": urljoin(UCI_URL, "statlog/vehicle/"),
"wine": urljoin(UCI_URL, "wine/wine.data"),
"new_thyroid_1": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid1.zip"
),
"new_thyroid_2": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid2.zip"
),
"cleveland": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "cleveland-0_vs_4.zip"
),
"led": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "led7digit-0-2-4-5-6-7-8-9_vs_1.zip"
),
"page_blocks_1_3": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "page-blocks-1-3_vs_4.zip"
),
"vowel": urljoin(urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "vowel0.zip"),
"yeast_1": urljoin(urljoin(KEEL_URL, "imb_IRlowerThan9/"), "yeast1.zip"),
"banknote_authentication": urljoin(
UCI_URL, "00267/data_banknote_authentication.txt"
),
"arcene": urljoin(UCI_URL, "arcene/"),
"audit": urljoin(UCI_URL, "00475/audit_data.zip"),
"spambase": urljoin(UCI_URL, "spambase/spambase.data"),
"parkinsons": urljoin(UCI_URL, "parkinsons/parkinsons.data"),
"ionosphere": urljoin(UCI_URL, "ionosphere/ionosphere.data"),
"breast_cancer": urljoin(UCI_URL, "breast-cancer-wisconsin/wdbc.data"),
"adult": urljoin(UCI_URL, "adult/adult.data"),
"abalone": urljoin(UCI_URL, "abalone/abalone.data"),
"acute": urljoin(UCI_URL, "acute/diagnosis.data"),
"annealing": urljoin(UCI_URL, "annealing/anneal.data"),
"census": urljoin(UCI_URL, "census-income-mld/census-income.data.gz"),
"contraceptive": urljoin(UCI_URL, "cmc/cmc.data"),
"covertype": urljoin(UCI_URL, "covtype/covtype.data.gz"),
"credit_approval": urljoin(UCI_URL, "credit-screening/crx.data"),
"dermatology": urljoin(UCI_URL, "dermatology/dermatology.data"),
"echocardiogram": urljoin(UCI_URL, "echocardiogram/echocardiogram.data"),
"flags": urljoin(UCI_URL, "flags/flag.data"),
"heart_disease": [
urljoin(UCI_URL, "heart-disease/processed.cleveland.data"),
urljoin(UCI_URL, "heart-disease/processed.hungarian.data"),
urljoin(UCI_URL, "heart-disease/processed.switzerland.data"),
urljoin(UCI_URL, "heart-disease/processed.va.data"),
],
"hepatitis": urljoin(UCI_URL, "hepatitis/hepatitis.data"),
"german_credit": urljoin(UCI_URL, "statlog/german/german.data"),
"thyroid": urljoin(UCI_URL, "thyroid-disease/thyroid0387.data"),
"first_order_theorem": urljoin(OPENML_URL, "1587932/phpPbCMyg"),
"gas_drift": urljoin(OPENML_URL, "1588715/phpbL6t4U"),
"autouniv_au7": urljoin(OPENML_URL, "1593748/phpmRPvKy"),
"autouniv_au4": urljoin(OPENML_URL, "1593744/phpiubDlf"),
"mice_protein": urljoin(OPENML_URL, "17928620/phpchCuL5"),
"steel_plates": urljoin(OPENML_URL, "18151921/php5s7Ep8"),
"cardiotocography": urljoin(OPENML_URL, "1593756/phpW0AXSQ"),
"waveform": urljoin(OPENML_URL, "60/dataset_60_waveform-5000.arff"),
"volkert": urljoin(OPENML_URL, "19335689/file1c556e3db171.arff"),
"asp_potassco": urljoin(OPENML_URL, "21377447/file18547f421393.arff"),
"wine_quality": urljoin(OPENML_URL, "4965268/wine-quality-red.arff"),
"mfeat_zernike": urljoin(OPENML_URL, "22/dataset_22_mfeat-zernike.arff"),
"gesture_segmentation": urljoin(OPENML_URL, "1798765/phpYLeydd"),
"texture": urljoin(OPENML_URL, "4535764/phpBDgUyY"),
"usps": urljoin(OPENML_URL, "19329737/usps.arff"),
"japanese_vowels": urljoin(OPENML_URL, "52415/JapaneseVowels.arff"),
"pendigits": urljoin(OPENML_URL, "32/dataset_32_pendigits.arff"),
"image_segmentation": urljoin(OPENML_URL, "18151937/phpyM5ND4"),
"baseball": urljoin(OPENML_URL, "3622/dataset_189_baseball.arff"),
"indian_pines": [
urljoin(GIC_URL, "2/22/Indian_pines.mat"),
urljoin(GIC_URL, "c/c4/Indian_pines_gt.mat"),
],
"salinas": [
urljoin(GIC_URL, "f/f1/Salinas.mat"),
urljoin(GIC_URL, "f/fa/Salinas_gt.mat"),
],
"salinas_a": [
urljoin(GIC_URL, "d/df/SalinasA.mat"),
urljoin(GIC_URL, "a/aa/SalinasA_gt.mat"),
],
"pavia_centre": [
urljoin(GIC_URL, "e/e3/Pavia.mat"),
urljoin(GIC_URL, "5/53/Pavia_gt.mat"),
],
"pavia_university": [
urljoin(GIC_URL, "e/ee/PaviaU.mat"),
urljoin(GIC_URL, "5/50/PaviaU_gt.mat"),
],
"kennedy_space_center": [
urljoin(GIC_URL, "2/26/KSC.mat"),
urljoin(GIC_URL, "a/a6/KSC_gt.mat"),
],
"botswana": [
urljoin(GIC_URL, "7/72/Botswana.mat"),
urljoin(GIC_URL, "5/58/Botswana_gt.mat"),
],
}
RANDOM_STATE = 0
class Datasets:
"""Base class to download and save datasets."""
def __init__(self, names="all"):
self.names = names
@staticmethod
def _modify_columns(data):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1)
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data = self._modify_columns(fetch_data())
self.content_.append((name, data))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
class ImbalancedBinaryDatasets(Datasets):
"""Class to download, transform and save binary class imbalanced
datasets."""
MULTIPLICATION_FACTORS = [2, 3]
@staticmethod
def _calculate_ratio(multiplication_factor, y):
"""Calculate ratio based on IRs multiplication factor."""
ratio = Counter(y).copy()
ratio[1] = int(ratio[1] / multiplication_factor)
return ratio
def _make_imbalance(self, data, multiplication_factor):
"""Undersample the minority class."""
X_columns = [col for col in data.columns if col != "target"]
X, y = check_X_y(data.loc[:, X_columns], data.target)
if multiplication_factor > 1.0:
sampling_strategy = self._calculate_ratio(multiplication_factor, y)
X, y = make_imbalance(
X, y, sampling_strategy=sampling_strategy, random_state=RANDOM_STATE
)
data = pd.DataFrame(np.column_stack((X, y)))
data.iloc[:, -1] = data.iloc[:, -1].astype(int)
return data
def download(self):
"""Download the datasets and append undersampled versions of them."""
super(ImbalancedBinaryDatasets, self).download()
undersampled_datasets = []
for (name, data), factor in list(
product(self.content_, self.MULTIPLICATION_FACTORS)
):
ratio = self._calculate_ratio(factor, data.target)
if ratio[1] >= 15:
data = self._make_imbalance(data, factor)
undersampled_datasets.append((f"{name} ({factor})", data))
self.content_ += undersampled_datasets
return self
def fetch_breast_tissue(self):
"""Download and transform the Breast Tissue Data Set.
The minority class is identified as the `car` and `fad`
labels and the majority class as the rest of the labels.
http://archive.ics.uci.edu/ml/datasets/breast+tissue
"""
data = pd.read_excel(FETCH_URLS["breast_tissue"], sheet_name="Data")
data = data.drop(columns="Case #").rename(columns={"Class": "target"})
data["target"] = data["target"].isin(["car", "fad"]).astype(int)
return data
def fetch_ecoli(self):
"""Download and transform the Ecoli Data Set.
The minority class is identified as the `pp` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/ecoli
"""
data = pd.read_csv(FETCH_URLS["ecoli"], header=None, delim_whitespace=True)
data = data.drop(columns=0).rename(columns={8: "target"})
data["target"] = data["target"].isin(["pp"]).astype(int)
return data
def fetch_eucalyptus(self):
"""Download and transform the Eucalyptus Data Set.
The minority class is identified as the `best` label
and the majority class as the rest of the labels.
https://www.openml.org/d/188
"""
data = pd.read_csv(FETCH_URLS["eucalyptus"])
data = data.iloc[:, -9:].rename(columns={"Utility": "target"})
data = data[data != "?"].dropna()
data["target"] = data["target"].isin(["best"]).astype(int)
return data
def fetch_glass(self):
"""Download and transform the Glass Identification Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/glass+identification
"""
data = pd.read_csv(FETCH_URLS["glass"], header=None)
data = data.drop(columns=0).rename(columns={10: "target"})
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_haberman(self):
"""Download and transform the Haberman's Survival Data Set.
The minority class is identified as the `1` label
and the majority class as the `0` label.
https://archive.ics.uci.edu/ml/datasets/Haberman's+Survival
"""
data = pd.read_csv(FETCH_URLS["haberman"], header=None)
data.rename(columns={3: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_heart(self):
"""Download and transform the Heart Data Set.
The minority class is identified as the `2` label
and the majority class as the `1` label.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_iris(self):
"""Download and transform the Iris Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/iris
"""
data = pd.read_csv(FETCH_URLS["iris"], header=None)
data.rename(columns={4: "target"}, inplace=True)
data["target"] = data["target"].isin(["Iris-setosa"]).astype(int)
return data
def fetch_libras(self):
"""Download and transform the Libras Movement Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/Libras+Movement
"""
data = pd.read_csv(FETCH_URLS["libras"], header=None)
data.rename(columns={90: "target"}, inplace=True)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_liver(self):
"""Download and transform the Liver Disorders Data Set.
The minority class is identified as the `1` label
and the majority class as the '2' label.
https://archive.ics.uci.edu/ml/datasets/liver+disorders
"""
data = pd.read_csv(FETCH_URLS["liver"], header=None)
data.rename(columns={6: "target"}, inplace=True)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_pima(self):
"""Download and transform the Pima Indians Diabetes Data Set.
The minority class is identified as the `1` label
and the majority class as the '0' label.
https://www.kaggle.com/uciml/pima-indians-diabetes-database
"""
data = pd.read_csv(FETCH_URLS["pima"], header=None, skiprows=9)
data.rename(columns={8: "target"}, inplace=True)
return data
def fetch_vehicle(self):
"""Download and transform the Vehicle Silhouettes Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/Statlog+(Vehicle+Silhouettes)
"""
data = pd.DataFrame()
for letter in ascii_lowercase[0:9]:
partial_data = pd.read_csv(
urljoin(FETCH_URLS["vehicle"], "xa%s.dat" % letter),
header=None,
delim_whitespace=True,
)
partial_data = partial_data.rename(columns={18: "target"})
partial_data["target"] = partial_data["target"].isin(["van"]).astype(int)
data = data.append(partial_data)
return data
def fetch_wine(self):
"""Download and transform the Wine Data Set.
The minority class is identified as the `2` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/wine
"""
data = pd.read_csv(FETCH_URLS["wine"], header=None)
data.rename(columns={0: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_new_thyroid_1(self):
"""Download and transform the Thyroid 1 Disease Data Set.
The minority class is identified as the `positive`
label and the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=145
"""
zipped_data = requests.get(FETCH_URLS["new_thyroid_1"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data)).read("new-thyroid1.dat").decode("utf-8")
)
data = pd.read_csv(
StringIO(sub(r"@.+\n+", "", unzipped_data)),
header=None,
sep=", ",
engine="python",
)
data.rename(columns={5: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_new_thyroid_2(self):
"""Download and transform the Thyroid 2 Disease Data Set.
The minority class is identified as the `positive`
label and the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=146
"""
zipped_data = requests.get(FETCH_URLS["new_thyroid_2"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data)).read("newthyroid2.dat").decode("utf-8")
)
data = pd.read_csv(
StringIO(sub(r"@.+\n+", "", unzipped_data)),
header=None,
sep=", ",
engine="python",
)
data.rename(columns={5: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_cleveland(self):
"""Download and transform the Heart Disease Cleveland Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=980
"""
zipped_data = requests.get(FETCH_URLS["cleveland"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data)).read("cleveland-0_vs_4.dat").decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=1330
"""
data = pd.read_csv(FETCH_URLS["dermatology"], header=None)
data.rename(columns={34: "target"}, inplace=True)
data.drop(columns=33, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_led(self):
"""Download and transform the LED Display Domain Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=998
"""
zipped_data = requests.get(FETCH_URLS["led"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data))
.read("led7digit-0-2-4-5-6-7-8-9_vs_1.dat")
.decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={7: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_page_blocks_1_3(self):
"""Download and transform the Page Blocks 1-3 Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=124
"""
zipped_data = requests.get(FETCH_URLS["page_blocks_1_3"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data))
.read("page-blocks-1-3_vs_4.dat")
.decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={10: "target"}, inplace=True)
data["target"] = data["target"].isin(["positive"]).astype(int)
return data
def fetch_vowel(self):
"""Download and transform the Vowel Recognition Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=127
"""
zipped_data = requests.get(FETCH_URLS["vowel"]).content
unzipped_data = ZipFile(BytesIO(zipped_data)).read("vowel0.dat").decode("utf-8")
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = data["target"].isin([" positive"]).astype(int)
return data
def fetch_yeast_1(self):
"""Download and transform the Yeast 1 Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
http://sci2s.ugr.es/keel/dataset.php?cod=153
"""
zipped_data = requests.get(FETCH_URLS["yeast_1"]).content
unzipped_data = ZipFile(BytesIO(zipped_data)).read("yeast1.dat").decode("utf-8")
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), header=None)
data.rename(columns={8: "target"}, inplace=True)
data["target"] = data["target"].isin([" positive"]).astype(int)
return data
class BinaryDatasets(Datasets):
"""Class to download, transform and save binary class datasets."""
def fetch_banknote_authentication(self):
"""Download and transform the Banknote Authentication Data Set.
https://archive.ics.uci.edu/ml/datasets/banknote+authentication
"""
data = pd.read_csv(FETCH_URLS["banknote_authentication"], header=None)
data.rename(columns={4: "target"}, inplace=True)
return data
def fetch_arcene(self):
"""Download and transform the Arcene Data Set.
https://archive.ics.uci.edu/ml/datasets/Arcene
"""
url = FETCH_URLS["arcene"]
data, labels = [], []
for data_type in ("train", "valid"):
data.append(
pd.read_csv(
urljoin(url, f"ARCENE/arcene_{data_type}.data"),
header=None,
sep=" ",
).drop(columns=list(range(1998, 10001)))
)
labels.append(
pd.read_csv(
urljoin(
url,
("ARCENE/" if data_type == "train" else "")
+ f"arcene_{data_type}.labels",
),
header=None,
).rename(columns={0: "target"})
)
data = pd.concat(data, ignore_index=True)
labels = pd.concat(labels, ignore_index=True)
data = pd.concat([data, labels], axis=1)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_audit(self):
"""Download and transform the Audit Data Set.
https://archive.ics.uci.edu/ml/datasets/Audit+Data
"""
zipped_data = requests.get(FETCH_URLS["audit"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data))
.read("audit_data/audit_risk.csv")
.decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), engine="python")
data = (
data.drop(columns=["LOCATION_ID"])
.rename(columns={"Risk": "target"})
.dropna()
)
return data
def fetch_spambase(self):
"""Download and transform the Spambase Data Set.
https://archive.ics.uci.edu/ml/datasets/Spambase
"""
data = pd.read_csv(FETCH_URLS["spambase"], header=None)
data.rename(columns={57: "target"}, inplace=True)
return data
def fetch_parkinsons(self):
"""Download and transform the Parkinsons Data Set.
https://archive.ics.uci.edu/ml/datasets/parkinsons
"""
data = pd.read_csv(FETCH_URLS["parkinsons"])
data = pd.concat(
[
data.drop(columns=["name", "status"]),
data[["status"]].rename(columns={"status": "target"}),
],
axis=1,
)
data["target"] = data["target"].isin([0]).astype(int)
return data
def fetch_ionosphere(self):
"""Download and transform the Ionosphere Data Set.
https://archive.ics.uci.edu/ml/datasets/ionosphere
"""
data = pd.read_csv(FETCH_URLS["ionosphere"], header=None)
data = data.drop(columns=[0, 1]).rename(columns={34: "target"})
data["target"] = data["target"].isin(["b"]).astype(int)
return data
def fetch_breast_cancer(self):
"""Download and transform the Breast Cancer Wisconsin Data Set.
https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
"""
data = pd.read_csv(FETCH_URLS["breast_cancer"], header=None)
data = pd.concat(
[data.drop(columns=[0, 1]), data[[1]].rename(columns={1: "target"})], axis=1
)
data["target"] = data["target"].isin(["M"]).astype(int)
return data
class ContinuousCategoricalDatasets(Datasets):
"""Class to download, transform and save datasets with both continuous
and categorical features."""
@staticmethod
def _modify_columns(data, categorical_features):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1), categorical_features
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data, categorical_features = self._modify_columns(*fetch_data())
self.content_.append((name, data, categorical_features))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
def fetch_adult(self):
"""Download and transform the Adult Data Set.
https://archive.ics.uci.edu/ml/datasets/Adult
"""
data = pd.read_csv(FETCH_URLS["adult"], header=None, na_values=" ?").dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3, 5, 6, 7, 8, 9, 13]
return data, categorical_features
def fetch_abalone(self):
"""Download and transform the Abalone Data Set.
https://archive.ics.uci.edu/ml/datasets/Abalone
"""
data = pd.read_csv(FETCH_URLS["abalone"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0]
return data, categorical_features
def fetch_acute(self):
"""Download and transform the Acute Inflammations Data Set.
https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations
"""
data = pd.read_csv(
FETCH_URLS["acute"], header=None, sep="\t", decimal=",", encoding="UTF-16"
)
data["target"] = data[6].str[0] + data[7].str[0]
data.drop(columns=[6, 7], inplace=True)
categorical_features = list(range(1, 6))
return data, categorical_features
def fetch_annealing(self):
"""Download and transform the Annealing Data Set.
https://archive.ics.uci.edu/ml/datasets/Annealing
"""
data = pd.read_csv(FETCH_URLS["annealing"], header=None, na_values="?")
# some features are dropped; they have too many missing values
missing_feats = (data.isnull().sum(0) / data.shape[0]) < 0.1
data = data.iloc[:, missing_feats.values]
data[2].fillna(data[2].mode().squeeze(), inplace=True)
data = data.T.reset_index(drop=True).T
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 1, 5, 9]
return data, categorical_features
def fetch_census(self):
"""Download and transform the Census-Income (KDD) Data Set.
https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29
"""
data = pd.read_csv(FETCH_URLS["census"], header=None)
categorical_features = (
list(range(1, 5))
+ list(range(6, 16))
+ list(range(19, 29))
+ list(range(30, 38))
+ [39]
)
# some features are dropped; they have too many missing values
cols_ids = [1, 6, 9, 13, 14, 20, 21, 29, 31, 37]
categorical_features = np.argwhere(
np.delete(
data.rename(columns={k: f"nom_{k}" for k in categorical_features})
.columns.astype("str")
.str.startswith("nom_"),
cols_ids,
)
).squeeze()
data = data.drop(columns=cols_ids).T.reset_index(drop=True).T
# some rows are dropped; they have rare missing values
data = data.iloc[
data.applymap(lambda x: x != " Not in universe").all(1).values, :
]
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
return data, categorical_features
def fetch_contraceptive(self):
"""Download and transform the Contraceptive Method Choice Data Set.
https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice
"""
data = pd.read_csv(FETCH_URLS["contraceptive"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [4, 5, 6, 8]
return data, categorical_features
def fetch_covertype(self):
"""Download and transform the Covertype Data Set.
https://archive.ics.uci.edu/ml/datasets/Covertype
"""
data = pd.read_csv(FETCH_URLS["covertype"], header=None)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
wilderness_area = pd.Series(
np.argmax(data.iloc[:, 10:14].values, axis=1), name=10
)
soil_type = pd.Series(np.argmax(data.iloc[:, 14:54].values, axis=1), name=11)
data = (
data.drop(columns=list(range(10, 54)))
.join(wilderness_area)
.join(soil_type)[list(range(0, 12)) + ["target"]]
)
categorical_features = [10, 11]
return data, categorical_features
def fetch_credit_approval(self):
"""Download and transform the Credit Approval Data Set.
https://archive.ics.uci.edu/ml/datasets/Credit+Approval
"""
data = pd.read_csv(
FETCH_URLS["credit_approval"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [0, 3, 4, 5, 6, 8, 9, 11, 12]
return data, categorical_features
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
https://archive.ics.uci.edu/ml/datasets/Dermatology
"""
data = pd.read_csv(
FETCH_URLS["dermatology"], header=None, na_values="?"
).dropna()
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = list(range(data.shape[1] - 1))
categorical_features.remove(33)
return data, categorical_features
def fetch_echocardiogram(self):
"""Download and transform the Echocardiogram Data Set.
https://archive.ics.uci.edu/ml/datasets/Echocardiogram
"""
data = pd.read_csv(
FETCH_URLS["echocardiogram"],
header=None,
error_bad_lines=False,
warn_bad_lines=False,
na_values="?",
)
data.drop(columns=[10, 11], inplace=True)
data.dropna(inplace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 3]
return data, categorical_features
def fetch_flags(self):
"""Download and transform the Flags Data Set.
https://archive.ics.uci.edu/ml/datasets/Flags
"""
data = pd.read_csv(FETCH_URLS["flags"], header=None)
target = data[6].rename("target")
data = data.drop(columns=[0, 6]).T.reset_index(drop=True).T.join(target)
categorical_features = [
0,
1,
4,
8,
9,
10,
11,
12,
13,
14,
15,
21,
22,
23,
24,
25,
26,
27,
]
return data, categorical_features
def fetch_heart_disease(self):
"""Download and transform the Heart Disease Data Set.
https://archive.ics.uci.edu/ml/datasets/Heart+Disease
"""
data = (
pd.concat(
[
pd.read_csv(url, header=None, na_values="?")
for url in FETCH_URLS["heart_disease"]
],
ignore_index=True,
)
.drop(columns=[10, 11, 12])
.dropna()
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8]
return data, categorical_features
def fetch_hepatitis(self):
"""Download and transform the Hepatitis Data Set.
https://archive.ics.uci.edu/ml/datasets/Hepatitis
"""
data = (
pd.read_csv(FETCH_URLS["hepatitis"], header=None, na_values="?")
.drop(columns=[15, 18])
.dropna()
)
target = data[0].rename("target")
data = data.drop(columns=[0]).T.reset_index(drop=True).T.join(target)
categorical_features = list(range(1, 13)) + [16]
return data, categorical_features
def fetch_german_credit(self):
"""Download and transform the German Credit Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
"""
data = pd.read_csv(FETCH_URLS["german_credit"], header=None, sep=" ")
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = (
np.argwhere(data.iloc[0, :-1].apply(lambda x: str(x)[0] == "A").values)
.squeeze()
.tolist()
)
return data, categorical_features
def fetch_heart(self):
"""Download and transform the Heart Data Set.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
categorical_features = [1, 2, 5, 6, 8, 10, 12]
return data, categorical_features
def fetch_thyroid(self):
"""Download and transform the Thyroid Disease Data Set.
Label 0 corresponds to no disease found.
Label 1 corresponds to one or multiple diseases found.
https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
"""
data = (
pd.read_csv(FETCH_URLS["thyroid"], header=None, na_values="?")
.drop(columns=27)
.dropna()
.T.reset_index(drop=True)
.T
)
data.rename(columns={data.columns[-1]: "target"}, inplace=True)
data["target"] = (
data["target"].apply(lambda x: x.split("[")[0]) != "-"
).astype(int)
categorical_features = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
18,
20,
22,
24,
26,
27,
]
return data, categorical_features
class MulticlassDatasets(Datasets):
"""Class to download, transform and save multiclass datasets."""
def fetch_first_order_theorem(self):
"""Download and transform the First Order Theorem Data Set.
https://www.openml.org/d/1475
"""
data = pd.read_csv(FETCH_URLS["first_order_theorem"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_gas_drift(self):
"""Download and transform the Gas Drift Data Set.
https://www.openml.org/d/1476
"""
data = pd.read_csv(FETCH_URLS["gas_drift"])
data.rename(columns={"Class": "target"}, inplace=True)
return data
def fetch_autouniv_au7(self):
"""Download and transform the AutoUniv au7 Data Set
https://www.openml.org/d/1552
"""
data = pd.read_csv(FETCH_URLS["autouniv_au7"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_autouniv_au4(self):
"""Download and transform the AutoUniv au4 Data Set
https://www.openml.org/d/1548
"""
data = pd.read_csv(FETCH_URLS["autouniv_au4"])
data.rename(columns={"Class": "target"}, inplace=True)
data.target = data.target.apply(lambda x: x.replace("class", "")).astype(int)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_mice_protein(self):
"""Download and transform the Mice Protein Data Set
https://www.openml.org/d/40966
"""
data = pd.read_csv(FETCH_URLS["mice_protein"])
data.rename(columns={"class": "target"}, inplace=True)
data.drop(columns=["MouseID"], inplace=True)
data.replace("?", np.nan, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
mask2 = data.isna().sum() < 10
data = data.loc[:, mask & mask2].dropna().copy()
data.iloc[:, :-1] = data.iloc[:, :-1].astype(float)
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_steel_plates(self):
"""Download and transform the Steel Plates Fault Data Set.
https://www.openml.org/d/40982
"""
data = pd.read_csv(FETCH_URLS["steel_plates"])
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
mapper = {v: k for k, v in enumerate(data.target.unique())}
data.target = data.target.map(mapper)
return data
def fetch_cardiotocography(self):
"""Download and transform the Cardiotocography Data Set.
https://www.openml.org/d/1560
"""
data = pd.read_csv(FETCH_URLS["cardiotocography"])
data.rename(columns={"Class": "target"}, inplace=True)
mask = (data.iloc[:, :-1].nunique() > 10).tolist()
mask.append(True)
data = data.loc[:, mask].copy()
return data
def fetch_waveform(self):
"""Download and transform the Waveform Database Generator (version 2) Data Set.
https://www.openml.org/d/60
"""
data = pd.read_csv(FETCH_URLS["waveform"])
data.rename(columns={"class": "target"}, inplace=True)
return data
def fetch_volkert(self):
"""Download and transform the Volkert Data Set.
https://www.openml.org/d/41166
"""
data = pd.read_csv(FETCH_URLS["volkert"])
data.rename(columns={"class": "target"}, inplace=True)
mask = (data.iloc[:, 1:].nunique() > 100).tolist()
mask.insert(0, True)
data = data.loc[:, mask].copy()
return data
def fetch_vehicle(self):
"""Download and transform the Vehicle Silhouettes Data Set.
https://archive.ics.uci.edu/ml/datasets/Statlog+(Vehicle+Silhouettes)
"""
data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import numpy as np
import os
from sklearn.decomposition import PCA
from data_process import image_data_collapse, torch_image_to_numpy, binarize_image_data
from RBM_train import RBM, TRAINING, TESTING, build_rbm_hopfield, load_rbm_hopfield
from settings import BETA, USE_BETA_SCHEDULER, DIR_OUTPUT, VISIBLE_FIELD, HRBM_CLASSIFIER_STEPS, HRBM_MANUAL_MAXSTEPS, \
MNIST_BINARIZATION_CUTOFF, DEFAULT_HOPFIELD, DIR_MODELS, DIR_CLASSIFY, CLASSIFIER
def setup_MNIST_classification(rbm):
label_dict = {}
for idx in range(rbm.dim_hidden):
idx_to_patternlabel_exact = rbm.pattern_labels[idx]
idx_to_patternlabel_class = idx_to_patternlabel_exact[0] # i.e. if its '7_0', take '7'
key_tuple = tuple([0 if a != idx else 1 for a in range(rbm.dim_hidden)])
label_dict[key_tuple] = idx_to_patternlabel_class
neg_key_tuple = tuple([0 if a != idx else -1 for a in range(rbm.dim_hidden)])
label_dict[neg_key_tuple] = 'anti-%s' % idx_to_patternlabel_class
return label_dict
def extend_init_visible_state(visible_state, dim_hidden, init_mode='A'):
# mode A - initial class label is all -1
# mode B - initial class label is coin flips
assert init_mode in ['A', 'B']
if init_mode == 'A':
visible_state_appendage = np.ones(dim_hidden) * -1
else:
visible_state_appendage = np.random.binomial(size=dim_hidden, n=1, p=0.5) * 2 - 1 # convert to -1, 1 form
visible_state = np.concatenate((visible_state, visible_state_appendage))
return visible_state
def classify_MNIST(rbm, visual_init, dataset_idx, MNIST_output_to_label, sum_mode=False, onehot=False):
visual_step = visual_init
if onehot:
visual_step = extend_init_visible_state(visual_init, rbm.dim_hidden, init_mode='A')
def conv_class_vector_to_label_onehot(visual_step):
# TODO alternate condensing
pool = True
class_label = rbm.onehot_class_label(visual_step, condense=True, pool=pool)
if class_label is None:
return False, None
else:
return True, class_label
for idx in range(HRBM_MANUAL_MAXSTEPS):
visual_step, hidden_step, output_step = rbm.RBM_step(visual_step)
classified, classification = conv_class_vector_to_label_onehot(visual_step)
if classified:
break
if idx == HRBM_MANUAL_MAXSTEPS - 1:
print("******************** Edge case unclassified: (%d) step == MAX_STEPS_CLASSIFY - 1" % dataset_idx)
#else:
# print('Step %d (data #%d) = %s' % (idx, dataset_idx, classification))
else:
def conv_class_vector_to_label(output_as_ints):
if tuple(output_as_ints) in MNIST_output_to_label.keys():
return True, MNIST_output_to_label[tuple(output_as_ints)]
else:
return False, output_as_ints
if sum_mode:
output_converter = rbm.truncate_output_subpatterns
else:
output_converter = rbm.truncate_output # rbm.truncate_output_max
for idx in range(HRBM_MANUAL_MAXSTEPS):
visual_step, hidden_step, output_step = rbm.RBM_step(visual_step)
output_truncated = output_converter(output_step)
classified, classification = conv_class_vector_to_label(output_truncated)
if classified:
break
if idx == HRBM_MANUAL_MAXSTEPS - 1:
print("******************** Edge case unclassified: (%d) step == MAX_STEPS_CLASSIFY - 1" % dataset_idx)
#print("\t output_step:", output_step)
#output_special = np.zeros(10, dtype=float)
#K = 10
#for idx in range(10):
# output_special[idx] = np.sum(output_step[K*idx:K*(idx + 1)])
#print("\t output_special:", output_special)
#print("\t output_truncated:", output_truncated)
#print("\t classification:", classification)
return classification
def rbm_features_MNIST(rbm, visual_init, steps=HRBM_CLASSIFIER_STEPS, use_hidden=True, plot_visible=False, titlemod='',
scheduler=USE_BETA_SCHEDULER, onehot=False):
visual_step = visual_init
P = rbm.dim_hidden
if use_hidden:
if onehot:
visual_step = extend_init_visible_state(visual_init, rbm.dim_hidden, init_mode='A')
features = np.zeros(P * 2)
else:
features = np.zeros(P)
else:
features = np.zeros(rbm.dim_visible)
# build temperature schedule TODO move out for speed
beta_schedule = [BETA for _ in range(steps)]
if scheduler:
assert steps == 5
switchpoint = 1
for idx in range(steps):
if idx < switchpoint:
beta_schedule[idx] = 200.0 # 2 seems too strong, 8 too weak
else:
beta_schedule[idx] = 8.0 - idx
if plot_visible:
rbm.plot_visible(visual_init, title='%s_0' % titlemod)
for idx in range(steps):
visual_step, hidden_step, _ = rbm.RBM_step(visual_step, beta=beta_schedule[idx])
if plot_visible:
title = '%s_%d' % (titlemod, idx+1)
rbm.plot_visible(visual_step, title=title)
if use_hidden:
features[0:P] = hidden_step
if onehot:
onehot_segment = visual_step[-P:]
features[P:] = onehot_segment
else:
features[:] = visual_step
return features
def confusion_matrix_from_pred(predictions, true_labels):
#confusion_matrix = np.zeros((rbm.dim_hidden, rbm.dim_hidden), dtype=int)
confusion_matrix_10 = np.zeros((10, 10), dtype=int)
matches = [False for _ in predictions]
for idx in range(len(predictions)):
if true_labels[idx] == predictions[idx]:
matches[idx] = True
confusion_matrix_10[true_labels[idx], predictions[idx]] += 1
return confusion_matrix_10, matches
def classifier_on_rbm_features(rbm, dataset_train, dataset_test, use_hidden=True, binarize=False, classifier=CLASSIFIER,
fast=None, onehot=False):
"""
fast: None or a 4-tuple of X_train, y_train, X_test, y_test
"""
if use_hidden:
if onehot:
feature_dim = rbm.dim_hidden * 2
else:
feature_dim = rbm.dim_hidden
else:
feature_dim = rbm.dim_visible
def get_X_y_features(dataset, steps=HRBM_CLASSIFIER_STEPS, scheduler=USE_BETA_SCHEDULER):
X = np.zeros((len(dataset), feature_dim))
y = np.zeros(len(dataset), dtype=int)
for idx, pair in enumerate(dataset):
elem_arr, elem_label = pair
if use_hidden:
preprocessed_input = binarize_image_data(image_data_collapse(elem_arr), threshold=MNIST_BINARIZATION_CUTOFF)
features = rbm_features_MNIST(rbm, preprocessed_input, titlemod='%d_true%d' % (idx,elem_label),
steps=steps, scheduler=scheduler, onehot=onehot)
else:
preprocessed_input = image_data_collapse(elem_arr)
if binarize:
preprocessed_input = binarize_image_data(preprocessed_input, threshold=MNIST_BINARIZATION_CUTOFF)
features = preprocessed_input
X[idx, :] = features
y[idx] = elem_label
return X, y
def get_X_fast(X):
X_features = np.zeros((X.shape[0], feature_dim))
for idx in range(X.shape[0]):
visible_input = X[idx, :]
features = rbm_features_MNIST(rbm, visible_input, use_hidden=use_hidden, titlemod='', plot_visible=False,
onehot=onehot)
X_features[idx, :] = features
return X_features
print("classifier_on_rbm_features; Step 1: get features for training")
if fast is None:
X_train_reduced, y_train = get_X_y_features(dataset_train)
else:
X_train_reduced = get_X_fast(fast[0])
y_train = fast[1]
print("classifier_on_rbm_features; Step 2: train classifier layer")
classifier.fit(X_train_reduced, y_train) # fit data
print("classifier_on_rbm_features; Step 3: get features for testing")
if fast is None:
X_test_reduced, y_test = get_X_y_features(dataset_test)
#X_test_reduced, y_test = get_X_y_features(dataset_test, steps=1, scheduler=False) # TODO try diff steps/beta rules for train vs test?
else:
X_test_reduced = get_X_fast(fast[2])
y_test = fast[3]
print("classifier_on_rbm_features; Step 4: classification metrics and confusion matrix")
# sparsity1 = np.mean(clf1.coef_ == 0) * 100 # percentage of nonzero weights
predictions = classifier.predict(X_test_reduced).astype(int)
confusion_matrix, matches = confusion_matrix_from_pred(predictions, y_test)
acc = float(matches.count(True) / len(matches))
print("Successful test cases: %d/%d (%.3f)" % (matches.count(True), len(matches), acc))
return confusion_matrix, acc
def get_X_y_dataset(dataset, dim_visible, binarize=True):
X = np.zeros((len(dataset), dim_visible))
y = np.zeros(len(dataset), dtype=int)
for idx, pair in enumerate(dataset):
elem_arr, elem_label = pair
preprocessed_input = image_data_collapse(elem_arr)
if binarize:
preprocessed_input = binarize_image_data(preprocessed_input, threshold=MNIST_BINARIZATION_CUTOFF)
features = preprocessed_input
X[idx, :] = features
y[idx] = elem_label
return X, y
def classifier_on_proj(dataset_train, dataset_test, dim_visible, binarize=False, dim=10, classifier=CLASSIFIER, proj=None, fast=None):
if proj is not None:
assert proj.shape == (dim_visible, dim)
print("classifier_on_proj; Step 1: get features for training")
if fast is None:
X_train, y_train = get_X_y_dataset(dataset_train, dim_visible, binarize=binarize)
else:
X_train = fast[0]
y_train = fast[1]
if proj is None:
pca = PCA(n_components=dim)
pca.fit(X_train)
X_train_reduced = pca.transform(X_train)
else:
X_train_reduced = np.dot(X_train, proj)
print("classifier_on_proj; Step 2: train classifier layer")
classifier.fit(X_train_reduced, y_train) # fit data
print("classifier_on_proj; Step 3: get features for testing")
# use PCA to reduce dim of testing set
if fast is None:
X_test, y_test = get_X_y_dataset(dataset_test, dim_visible, binarize=binarize)
else:
X_test = fast[2]
y_test = fast[3]
if proj is None:
X_test_reduced = pca.transform(X_test)
else:
X_test_reduced = np.dot(X_test, proj)
print("classifier_on_proj; Step 4: classification metrics and confusion matrix")
predictions = classifier.predict(X_test_reduced).astype(int)
confusion_matrix, matches = confusion_matrix_from_pred(predictions, y_test)
acc = float(matches.count(True) / len(matches))
print("Successful test cases: %d/%d (%.3f)" % (matches.count(True), len(matches), acc))
return confusion_matrix, acc
def plot_confusion_matrix(confusion_matrix, classlabels=list(range(10)), title='', save=None):
# Ref: https://stackoverflow.com/questions/35572000/how-can-i-plot-a-confusion-matrix
import seaborn as sn
import pandas as pd
ylabels = classlabels
if confusion_matrix.shape[1] == len(ylabels) + 1:
xlabels = ylabels + ['Other']
else:
xlabels = ylabels
df_cm =
|
pd.DataFrame(confusion_matrix, index=ylabels, columns=xlabels)
|
pandas.DataFrame
|
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = pd.merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"incompatible merge keys \[0\] .*, must be the same type"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a":
|
pd.Categorical(["a", "X", "c", "X", "b"])
|
pandas.Categorical
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
|
pd.Timestamp('2011-01-02')
|
pandas.Timestamp
|
import os
import pandas as pd
import dash
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
from dash_tabulator import DashTabulator
from . import tools as T
from ms_mint.standards import PEAKLIST_COLUMNS
from ms_mint.peaklists import read_peaklists
columns = [{"name": i, "id": i,
"selectable": True} for i in PEAKLIST_COLUMNS]
tabulator_options = {
"groupBy": "Label",
"selectable": True,
"headerFilterLiveFilterDelay":3000,
"layout": "fitDataFill",
"height": "900px",
}
downloadButtonType = {"css": "btn btn-primary", "text":"Export", "type":"csv", "filename":"Metadata"}
clearFilterButtonType = {"css": "btn btn-outline-dark", "text":"Clear Filters"}
pkl_table = html.Div(id='pkl-table-container',
style={'minHeight': 100, 'margin': '50px 50px 0px 0px'},
children=[
DashTabulator(id='pkl-table',
columns=T.gen_tabulator_columns(['peak_label', 'mz_mean','mz_width', 'rt', 'rt_min', 'rt_max', 'intensity_threshold', 'peaklist_name']),
options=tabulator_options,
downloadButtonType=downloadButtonType,
clearFilterButtonType=clearFilterButtonType
)
])
_label = 'Peaklist'
_layout = html.Div([
html.H3('Peaklist'),
dcc.Upload(
id='pkl-upload',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=True
),
dcc.Dropdown('pkl-ms-mode', options=[
{'value': 'positive', 'label': 'Add proton mass to formula (positive mode)'},
{'value': 'negative', 'label': 'Subtract proton mass from formula (negative mode)'}], value=None),
html.Button('Save', id='pkl-save'),
html.Button('Clear', id='pkl-clear', style={'float': 'right'}),
pkl_table
])
_outputs = html.Div(id='pkl-outputs', children=[
html.Div(id={'index': 'pkl-upload-output', 'type': 'output'}),
html.Div(id={'index': 'pkl-save-output', 'type': 'output'}),
html.Div(id={'index': 'pkl-clear-output', 'type': 'output'}),
])
def layout():
return _layout
def callbacks(app, fsc=None, cache=None):
@app.callback(
Output('pkl-table', 'data'),
Input('pkl-upload', 'contents'),
Input('pkl-ms-mode', 'value'),
Input('pkl-clear', 'n_clicks'),
State('pkl-upload', 'filename'),
State('pkl-upload', 'last_modified'),
State('wdir', 'children')
)
def pkl_upload(list_of_contents, ms_mode, clear, list_of_names, list_of_dates, wdir):
prop_id = dash.callback_context.triggered[0]['prop_id']
if prop_id.startswith('pkl-clear'):
return pd.DataFrame(columns=PEAKLIST_COLUMNS).to_dict('records')
target_dir = os.path.join(wdir, 'peaklist')
fn = os.path.join( target_dir, 'peaklist.csv')
if list_of_contents is not None:
dfs = [T.parse_pkl_files(c, n, d, target_dir, ms_mode=ms_mode) for c, n, d in
zip(list_of_contents, list_of_names, list_of_dates) ]
data = dfs[0].to_dict('records')
return data
elif os.path.isfile(fn):
return read_peaklists(fn).to_dict('records')
@app.callback(
Output({'index': 'pkl-save-output', 'type': 'output'}, 'children'),
Input('pkl-save', 'n_clicks'),
Input('pkl-table', 'data'),
State('wdir', 'children')
)
def plk_save(n_clicks, data, wdir):
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# Type: module
# String form: <module 'WindPy' from '/opt/conda/lib/python3.6/WindPy.py'>
# File: /opt/conda/lib/python3.6/WindPy.py
# Source:
from ctypes import *
import threading
import traceback
from datetime import datetime, date, time, timedelta
import time as t
import re
from WindData import *
from WindBktData import *
from XMLParser import XMLReader
import pandas as pd
import logging
import getpass
r = XMLReader("/wind/serverapi/wsq_decode.xml")
# import speedtcpclient as client
expolib = None
speedlib = None
TDB_lib = None
c_lib = None
# For test use! Should be replaced with a real userID
# userID = "1214779"
api_retry = 1
interval = 2
userName = getpass.getuser()
authDataPath = "/home/" + userName + "/.wind/authData"
authString = readFile(authDataPath)
# userID = str(getJsonTag(authString, 'accountID'))
# if userID == '':
# userID = "1214779"
wind_log_path = "/usr/local/log/"
def DemoWSQCallback(out):
print("DemoWSQCallback")
print(out)
wsq_items = []
def g_wsq_callback(reqID, indata):
out = WindData()
out.set(indata, 3)
out.RequestID = reqID
id2rtField = {}
for item in wsq_items:
id2rtField[item['id']] = item['funname'].upper()
tmp = [id2rtField[str(val)] for val in out.Fields]
out.Fields = tmp
out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S')
try:
g_wsq_callback.callback_funcs[reqID](out)
except:
print(out)
SPDCBTYPE = CFUNCTYPE(None, c_int, POINTER(c_apiout))
spdcb = SPDCBTYPE(g_wsq_callback)
g_wsq_callback.callback_funcs = {}
REQUEST_ID_CANCELALL = 0
REQUEST_ID_SYNC = 1
REQUEST_ID_MAX_RESQUEST = 9999
REQUEST_ID_MIN_RESQUEST = 3
g_requestID = REQUEST_ID_MIN_RESQUEST # The minimum id of NONE BLOCKING MODE
def retry(func):
def wrapper(*args, **kargs):
out = func(*args, **kargs)
if not out:
return out
error_code = type_check(out)
if error_code == -10:
for i in range(api_retry):
out = func(*args, **kargs)
error_code = type_check(out)
if error_code != -10:
break
return out
# 判断out类型,若带usedf参数则为tuple
def type_check(out):
if isinstance(out, tuple):
error_code = out[0]
else:
error_code = out.ErrorCode
return error_code
return wrapper
class WindQnt:
b_start = False
def __static_var(var_name, inital_value):
def _set_var(obj):
setattr(obj, var_name, inital_value)
return obj
return _set_var
def __stringify(arg):
if arg is None:
tmp = [""]
elif arg == "":
tmp = [""]
elif isinstance(arg, str):
a_l = arg.strip().split(',')
arg = ','.join([a.strip() for a in a_l])
tmp = [arg]
elif isinstance(arg, list):
tmp = [str(x) for x in arg]
elif isinstance(arg, tuple):
tmp = [str(x) for x in arg]
elif isinstance(arg, float) or isinstance(arg, int):
tmp = [str(arg)]
elif str(type(arg)) == "<type 'unicode'>":
tmp = [arg]
else:
tmp = None
if tmp is None:
return None
else:
return ";".join(tmp)
def __parseoptions(self, arga=None, argb=None):
options = WindQnt._WindQnt__stringify(self)
if options is None:
return None
if isinstance(arga, tuple):
for i in range(len(arga)):
v = WindQnt._WindQnt__stringify(arga[i])
if v is None:
continue
else:
if options == "":
options = v
else:
options = options + ";" + v
if isinstance(argb, dict):
keys = argb.keys()
for key in keys:
v = WindQnt._WindQnt__stringify(argb[key])
if v is None:
continue
else:
if options == "":
options = str(key) + "=" + v
else:
options = options + ";" + str(key) + "=" + v
return options
@staticmethod
def format_option(options):
if options is None:
return None
option_f = options.replace(';', '&&')
return option_f
# with_time param means you can format hours:minutes:seconds, but not must be
def __parsedate(self, with_time=False):
d = self
if d is None:
d = datetime.today().strftime("%Y-%m-%d")
return d
elif isinstance(d, date):
d = d.strftime("%Y-%m-%d")
return d
elif isinstance(d, datetime):
d = d.strftime("%Y-%m-%d")
return d
elif isinstance(d, str):
try:
d = pure_num = ''.join(list(filter(str.isdigit, d)))
if len(d) != 8 and len(d) != 14:
return None
if len(pure_num) == 14:
d = pure_num[:8] + ' ' + pure_num[8:]
if int(d[9:11]) > 24 or int(d[9:11]) < 0 or \
int(d[11:13]) > 60 or int(d[11:13]) < 0 or \
int(d[13:15]) > 60 or int(d[13:15]) < 0:
return None
if int(d[:4]) < 1000 or int(d[:4]) > 9999 or \
int(d[4:6]) < 1 or int(d[4:6]) > 12 or \
int(d[6:8]) < 1 or int(d[6:8]) > 31:
return None
date_time = d.split(' ')
YMD = date_time[0][:4] + '-' + date_time[0][4:6] + '-' + date_time[0][6:8]
HMS = ''
if with_time and len(date_time) == 2:
HMS = ' ' + date_time[1][:2] + ':' + date_time[1][2:4] + ':' + date_time[1][4:6]
d = YMD + HMS
return d
except:
return None
return d
# def __parsedate(d):
# if d is None:
# d = datetime.today().strftime("%Y-%m-%d")
# return d
# elif isinstance(d, date):
# d = d.strftime("%Y-%m-%d")
# return d
# elif isinstance(d, str):
# try:
# #Try to get datetime object from the user input string.
# #We will go to the except block, given an invalid format.
# if re.match(r'^(?:(?!0000)[0-9]{4}-(?:(?:0[1-9]|1[0-2])-(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])-(?:29|30)|(?:0[13578]|1[02])-31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)-02-29)$',d, re.I|re.M):
# d = datetime.strptime(d, "%Y-%m-%d")
# return d.strftime("%Y-%m-%d")
# elif re.match(r'^(?:(?!0000)[0-9]{4}(?:(?:0[1-9]|1[0-2])(?:0[1-9]|1[0-9]|2[0-8])|(?:0[13-9]|1[0-2])(?:29|30)|(?:0[13578]|1[02])31)|(?:[0-9]{2}(?:0[48]|[2468][048]|[13579][26])|(?:0[48]|[2468][048]|[13579][26])00)0229)$', d, re.I|re.M):
# d = datetime.strptime(d, "%Y%m%d")
# return d.strftime("%Y-%m-%d")
# else:
# return None
# except:
# return None
# else:
# return None
#
# return d
def use_debug_file(self, debug_expo='/wind/serverapi/libExpoWrapperDebug.so',
debug_speed='/wind/serverapi/libSpeedWrapperDebug.so'):
WindQnt.debug_expo = debug_expo
WindQnt.debug_speed = debug_speed
@staticmethod
def format_wind_data(error_codes, msg):
out = WindData()
out.ErrorCode = error_codes
out.Codes = ['ErrorReport']
out.Fields = ['OUT MESSAGE']
out.Times = datetime.now().strftime('%Y%m%d %H:%M:%S')
out.Data = [[msg]]
return out
@staticmethod
def to_dataframe(out):
if out.ErrorCode != 0:
return
|
pd.DataFrame([out.ErrorCode], columns=['ErrorCode'])
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.gridspec import GridSpec
plt.style.use('ggplot')
gg_colors = {
'red':'#E24A33',
'blue':'#348ABD',
'purple':'#988ED5',
'gray':'#777777',
'orange':'#FBC15E',
'green':'#8EBA42',
'pink':'#FFB5B8'
}
print ([ii['color'] for ii in list(plt.rcParams['axes.prop_cycle'])])
def hex2rgb(h):
h = h.lstrip('#')
return [int(h[i:i+2], 16) for i in (0, 2, 4)]
df_S2 = pd.read_csv(os.path.join(os.getcwd(),'data','band_perturbation','band_perturbation_S2.csv')).sort_values('records').set_index(['records','bands'])
df_SPOT = pd.read_csv(os.path.join(os.getcwd(),'data','band_perturbation','band_perturbation_SPOT.csv'))
root = os.getcwd()
# prep SPOT df
idx = pd.IndexSlice
#all_bands = list(df.index.get_level_values(1).unique())
#all_bands = [bb for bb in all_bands if bb!='none']
S2_bands = [{'name': 'coastal-aerosol', 'resolution':'60m', 'color':'purple' },
{'name': 'blue', 'resolution':'10m', 'color':'blue' },
{'name': 'green', 'resolution':'10m', 'color':'green' },
{'name': 'red', 'resolution':'10m', 'color':'red' },
{'name': 'red-edge', 'resolution':'20m', 'color':'pink' },
{'name': 'red-edge-2', 'resolution':'20m', 'color':'pink' },
{'name': 'red-edge-3', 'resolution':'20m', 'color':'pink' },
{'name': 'nir', 'resolution':'10m', 'color':'orange' },
{'name': 'red-edge-4', 'resolution':'20m', 'color':'pink' },
{'name': 'water-vapor', 'resolution':'60m', 'color':'blue' },
{'name': 'cirrus', 'resolution':'60m', 'color':'green' },
{'name': 'swir2', 'resolution':'20m', 'color':'orange' },
{'name': 'swir1', 'resolution':'20m', 'color':'orange' },
{'name': 'alpha', 'resolution':'10m', 'color':'gray' },
]
SPOT_bands = [ {'name':'red', 'resolution':'1.5m', 'color':'red'},
{'name':'green', 'resolution':'1.5m', 'color':'green'},
{'name':'blue', 'resolution':'1.5m', 'color':'blue'},
{'name':'nir', 'resolution':'1.5m', 'color':'orange'},]
print (S2_bands)
keep_records = (df_S2.loc[idx[:,'none'],'band_dropout']>0).values
df_S2 = df_S2[np.repeat(keep_records,15)]
df_SPOT = df_SPOT[df_SPOT['None']>0]
for band in S2_bands:
# impairment
df_S2.loc[idx[:,band['name']],:] = (df_S2.loc[idx[:,'none'],:].values - df_S2.loc[idx[:,band['name']],:].values) # / df_S2.loc[idx[:,'none'],:].values
for ii_b, band in enumerate(SPOT_bands):
df_SPOT[band['name']+'_bdo'] = (df_SPOT['None'] - df_SPOT['bdo_'+str(ii_b)]) #/ df_SPOT['None']
df_SPOT[band['name']+'_additive_0.1'] = (df_SPOT['None'] - df_SPOT['additive_0.1_'+str(ii_b)]) #/ df_SPOT['None']
df_SPOT[band['name']+'_additive_0.2'] = (df_SPOT['None'] - df_SPOT['additive_0.2_'+str(ii_b)]) #/ df_SPOT['None']
df_SPOT[band['name']+'_additive_0.3'] = (df_SPOT['None'] - df_SPOT['additive_0.3_'+str(ii_b)]) #/ df_SPOT['None']
df_SPOT[band['name']+'_multiplicative_0.1'] = (df_SPOT['None'] - df_SPOT['multiplicative_0.1_'+str(ii_b)]) #/ df_SPOT['None']
df_SPOT[band['name']+'_multiplicative_0.2'] = (df_SPOT['None'] - df_SPOT['multiplicative_0.2_'+str(ii_b)]) #/ df_SPOT['None']
df_SPOT[band['name']+'_multiplicative_0.3'] = (df_SPOT['None'] - df_SPOT['multiplicative_0.3_'+str(ii_b)]) #/ df_SPOT['None']
print (df_S2)
#fig, axs = plt.subplots(3,1, figsize=(32,10), sharey=True, sharex=True)
fig = plt.figure(figsize=(24,10))
gs = GridSpec(3,9, figure=fig)
axs = {}
axs['S2'] = {'bdo':fig.add_subplot(gs[0,0:7]),
'additive':fig.add_subplot(gs[1,0:7]),
'multiplicative':fig.add_subplot(gs[2,0:7])}
axs['SPOT'] = {'bdo':fig.add_subplot(gs[0,7:]),
'additive':fig.add_subplot(gs[1,7:]),
'multiplicative':fig.add_subplot(gs[2,7:])}
data_out = {}
# do band dropout
data = []
for ii_b, band in enumerate(S2_bands):
data.append(df_S2.loc[idx[:,band['name']],'band_dropout'].values.clip(-1,1))
el = df_S2.loc[idx[:,band['name']],'band_dropout'].values.clip(-1,1)
print ('band')
print (band)
data_out[band['name']] = {
'mean':el.mean(),
'Q20':np.percentile(el,20),
'Q80':np.percentile(el,80),
'min':el.min(),
'max':el.max(),
}
df_out = pd.DataFrame(data_out)
df_out.to_csv(os.path.join(root,'makefigs','data','fig-A8_S2_bands.csv'))
bplot0S2 = axs['S2']['bdo'].boxplot(data, whis='range',patch_artist=True, medianprops = dict(linestyle='-', linewidth=1, color='firebrick'))
axs['S2']['bdo'].set_ylabel('IoU Impairment')
axs['S2']['bdo'].set_ylim([-1, 1.])
axs['S2']['bdo'].set_title('Band Dropout - Sentinel-2')
axs['S2']['bdo'].set_xticklabels([])
axs['S2']['bdo'].yaxis.set_major_formatter(mtick.PercentFormatter(1))
data = []
data_out = {}
for ii_b, band in enumerate(SPOT_bands):
data.append(df_SPOT[band['name']+'_bdo'].values.clip(-1,1))
el = df_SPOT[band['name']+'_bdo'].values.clip(-1,1)
data_out[band['name']] = {
'mean':el.mean(),
'Q20':np.percentile(el,20),
'Q80':np.percentile(el,80),
'min':el.min(),
'max':el.max()
}
df_out = pd.DataFrame(data_out)
df_out.to_csv(os.path.join(root,'makefigs','data','fig-A8_SPOT_bands.csv'))
bplot0SPOT = axs['SPOT']['bdo'].boxplot(data, whis='range',patch_artist=True, medianprops = dict(linestyle='-', linewidth=1, color='firebrick'))
#axs['SPOT']['bdo'].set_ylabel('IoU Impairment')
axs['SPOT']['bdo'].set_title('SPOT6/7')
axs['SPOT']['bdo'].set_ylim([-1, 1.])
axs['SPOT']['bdo'].set_xticklabels([])
axs['SPOT']['bdo'].yaxis.set_major_formatter(mtick.PercentFormatter(1))
axs['SPOT']['bdo'].set_yticklabels([])
# do additive 1,2,3
data = []
positions=[]
data_out = {}
for ii_b, band in enumerate(S2_bands):
data.append(df_S2.loc[idx[:,band['name']],'additive_0.1'].values.clip(-1,1))
data.append(df_S2.loc[idx[:,band['name']],'additive_0.2'].values.clip(-1,1))
data.append(df_S2.loc[idx[:,band['name']],'additive_0.3'].values.clip(-1,1))
positions += [(ii_b+1 + (ii_p-1)/4) for ii_p in range(3)]
el1 = df_S2.loc[idx[:,band['name']],'additive_0.1'].values.clip(-1,1)
el2 = df_S2.loc[idx[:,band['name']],'additive_0.2'].values.clip(-1,1)
el3 = df_S2.loc[idx[:,band['name']],'additive_0.3'].values.clip(-1,1)
for el, label in zip([el1,el2,el3],['_additive_0.1','_additive_0.2','_additive_0.3']):
data_out[band['name']+label] = {
'mean':el.mean(),
'Q20':np.percentile(el,20),
'Q80':np.percentile(el,80),
'min':el.min(),
'max':el.max()
}
df_out = pd.DataFrame(data_out)
df_out.to_csv(os.path.join(root,'makefigs','data','fig-A8_S2_additive.csv'))
bplot1S2 = axs['S2']['additive'].boxplot(data, positions=positions, whis='range', widths=0.15, patch_artist=True, medianprops = dict(linestyle='-', linewidth=1, color='firebrick'))
axs['S2']['additive'].set_ylabel('IoU Impairment')
axs['S2']['additive'].set_ylim([-1, 1.])
axs['S2']['additive'].set_xticks(range(1,15))
axs['S2']['additive'].set_title('Additive Noise [10%, 20%, 30%] - Sentinel-2')
axs['S2']['additive'].set_xticklabels([])
axs['S2']['additive'].yaxis.set_major_formatter(mtick.PercentFormatter(1))
data = []
positions=[]
data_out = {}
for ii_b, band in enumerate(SPOT_bands):
data.append(df_SPOT[band['name']+'_additive_0.1'].values.clip(-1,1))
data.append(df_SPOT[band['name']+'_additive_0.2'].values.clip(-1,1))
data.append(df_SPOT[band['name']+'_additive_0.3'].values.clip(-1,1))
positions += [(ii_b+1 + (ii_p-1)/4) for ii_p in range(3)]
el1 = df_SPOT[band['name']+'_additive_0.1'].values.clip(-1,1)
el2 = df_SPOT[band['name']+'_additive_0.2'].values.clip(-1,1)
el3 = df_SPOT[band['name']+'_additive_0.3'].values.clip(-1,1)
for el, label in zip([el1,el2,el3],['_additive_0.1','_additive_0.2','_additive_0.3']):
data_out[band['name']+label] = {
'mean':el.mean(),
'Q20':np.percentile(el,20),
'Q80':np.percentile(el,80),
'min':el.min(),
'max':el.max()
}
df_out =
|
pd.DataFrame(data_out)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from glob import glob
from pycsca.constants import *
from pycsca.utils import create_dir_recursively
import os
from os import listdir
from os.path import isfile, join
from sklearn.metrics import confusion_matrix, f1_score
import pickle as pk
# In[2]:
TIME_COLUMN = "Time-Taken Hours"
EXP_TIME_COLUMN = "Experiment-Time Hours"
(d1, d2) = ('independent', 'dependent')
(d3, d4) = ('independent_diff_sizes', 'dependent_diff_sizes')
pval_col = [FISHER_PVAL + '-sum', FISHER_PVAL + '-median', FISHER_PVAL + '-mean', TTEST_PVAL + '-random',
TTEST_PVAL + '-majority']
pval_col_2 =["Sum Fisher", "Median Fisher", "Mean Fisher", "Random Guessing",
"Majority Classifier", ]
d_pvals = dict(zip(pval_col, pval_col_2))
MIN_LABEL_WEIGHT = 0.01
datasets = ['results-gp']
names = ["Synthetic Dataset"]
T_INSTANCES = "# Instances"
CL1_WEIGHT = "Class-Label 1 Weight"
SPLITS = "Splits"
DATASET_LABEL = "Dataset"
MAXI = 4000
MINI = 200
ncolumns = [ "Hypothesis Test", SPLITS, EXP_TIME_COLUMN, "False-Postive Rate", "False-Postive Rate Se", "False-Negative Rate",
"False-Negative Rate Se", ACCURACY, ACCURACY +" Se", F1SCORE, F1SCORE + " Se",
INFORMEDNESS, INFORMEDNESS + " Se", "(tp, tn,fp, fn)"]
# In[3]:
def get_scores(y_trues, y_preds):
fnrs = []
fprs = []
accuracies = []
cms = []
f1s = []
infs = []
for y_true, y_pred in zip(y_trues, y_preds):
#try:
tn, fn, fp, tp = confusion_matrix(y_true, y_pred).T.ravel()
f1 = f1_score(y_true, y_pred)
cp = np.array(y_true).sum()
cn = np.logical_not(y_true).sum()
inf = np.nansum([tp / cp, tn / cn, -1])
#except:
# tp = np.logical_and(y_true, y_pred).sum()
# tn = np.logical_and(np.logical_not(y_true), np.logical_not(y_pred)).sum()
# fp = np.logical_and(np.logical_not(y_true), y_pred).sum()
# fn = np.logical_and(y_true, np.logical_not(y_pred)).sum()
fnr = (fn/(fn+tp)).round(2)
fpr = (fp/(fp+tn)).round(2)
accuracy = ((tp+tn)/(tp+tn+fp+fn))
if np.isnan(fpr):
fpr = 'NA'
tn,fp = 'NA', 'NA'
if np.isnan(fnr):
fnr = 'NA'
tp,fn = 'NA', 'NA'
fnrs.append(fnr)
fprs.append(fpr)
accuracies.append(accuracy)
f1s.append(f1)
infs.append(inf)
cms.append([tn, fn, fp, tp])
return fnrs, fprs, np.array(accuracies), np.array(f1s), np.array(infs), np.array(cms)
def get_labels_per_column(data_frame, column):
labels = {k: [] for k in pval_col}
labels['GT'] = []
for s, df in data_frame.groupby(column):
labels['GT'].append(df['GT'].values)
for pcol in pval_col:
labels[pcol].append(df[pcol].values)
return labels
def get_labels_per_2_columns(data_frame, column, column1):
labels = {k: [] for k in pval_col}
labels['GT'] = []
for s, data_frame_s in data_frame.groupby(column):
for s, dd in data_frame_s.groupby(column):
labels['GT'].append(df['GT'].values)
for pcol in pval_col:
labels[pcol].append(df[pcol].values)
return labels
def insert_times(df, m_file):
with open(m_file, 'rb') as f:
m_dict = pk.load(f)
f.close()
df[TIME_COLUMN] = 0.0
for index, row in df.iterrows():
k = SCORE_KEY_FORMAT.format('', row[DATASET], row[FOLD_ID])
time = np.sum([m_dict[key][TIME_TAKEN] for key in m_dict.keys() if k in key])
df.loc[index, TIME_COLUMN] = time.round(4)/3600
#print(k, time)
return df
def create_dataframe(final_predictions, column1, column2):
final = []
length = len(final_predictions.groupby(column1))
for col1_value, df in final_predictions.groupby(column1):
labels = get_labels_per_column(df, column2)
y_true = labels['GT']
time = np.sum(df[TIME_COLUMN].values)
for pcol in pval_col:
y_pred = labels[pcol]
fnrs, fprs, accuracies, f1s, infs, cms = get_scores(y_true, y_pred)
(tn, fn, fp, tp) = cms.sum(axis=0)
one_row = [pcol, col1_value, time, np.mean(fprs), np.std(fprs), np.mean(fnrs), np.std(fnrs),
np.mean(accuracies), np.std(accuracies), np.mean(f1s), np.std(f1s), np.mean(infs),
np.std(infs), (tp,tn,fp,fn)]
final.append(one_row)
ncolumns[1] = column1
result_df =
|
pd.DataFrame(final, columns=ncolumns)
|
pandas.DataFrame
|
import unyt as u
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.ticker as ticker
import csv as csv
from scipy.stats import linregress
from matplotlib.colors import ListedColormap
plt.rc("font", family="serif")
def plot_mu_vs_pressure():
data_path = '../simulations/bulk-water/'
data_nd = pd.read_csv(data_path+"cassandra/analysis/results.csv")
data_ws = pd.read_csv(data_path+"gomc/analysis/Avg_data_from Box_0_water_df.txt", sep="\s+")
mus_ws = data_ws["ChemPot_K"].values * u.K * u.kb
press_ws = data_ws["P_bar"].values * u.bar
# @300 K, https://www.nist.gov/mml/csd/informatics/sat-tmmc-liquid-vapor-coexistence-properties-spce-water-lrc
psat_nist = 1.017e-02 * u.bar
fig, ax = plt.subplots()
# Plot ND results
ax.scatter(
data_nd["mu-cassandra_kJmol"],
data_nd["pressure_bar"],
marker="s",
s=50,
alpha=0.9,
label="Cassandra",
)
# Plot shifted WS results
mass_water = 18.015 * u.amu
temperature = 298.0 * u.K
debroglie = u.h / np.sqrt(2 * np.pi * mass_water * u.kb * temperature)
ws_offset = 3 * u.kb * temperature * np.log(debroglie.to_value(u.angstrom))
ax.scatter(
mus_ws.to_value("kJ/mol") + ws_offset.to_value("kJ/mol"),
press_ws.to_value("bar"),
marker="o",
s=50,
alpha=0.9,
label="GOMC",
)
ax.set_yscale("log")
ax.set_xlabel("$\mu'$, kJ/mol", fontsize=14, labelpad=15)
ax.set_ylabel("Pressure, bar", fontsize=14, labelpad=15)
ax.tick_params(axis="both", which="major", labelsize=12)
ax.legend()
fig.tight_layout()
fig.savefig("chempot-compare.pdf")
def plot_ads_des_data():
sns.color_palette("deep",2)
E_vs_P_Psat_saving_name = "E_vs_P_div_Psat.pdf"
axis_Label_font_size = 22
legend_font_size = 16
axis_number_font_size = 18
PointSizes = 8
ConnectionLineSizes = 2
Reg_Density_Linestyle = '--' #'-' = solid, '--' = dashed, ':'= dotted
Critical_Density_Linestyle = None
Psat_data_file = '../simulations/Psat_SPCE_298K/gomc/analysis/SPCE_Pvap_at_298K_df.csv'
Psat_data = pd.read_csv(Psat_data_file, sep=',', header=0, na_values='NaN',
usecols=[0,1], index_col=False)
Psat_data =
|
pd.DataFrame(Psat_data)
|
pandas.DataFrame
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import utils.metrics
def find_best_thres(df_val):
total = len(df_val)
records = []
for label in range(28):
label_key = 'L{:02d}'.format(label)
prob_key = 'P{:02d}'.format(label)
df = df_val[[label_key, prob_key]]
df_pos = df[df[label_key] == 1]
proportion = len(df_pos) / total
best_diff = 1000
best_thres = 0
for thres in np.arange(0.05, 1.00, 0.01):
positive = int(np.sum((df_val[prob_key].values > thres).astype(int)))
cur_proportion = positive / total
cur_diff = abs(proportion - cur_proportion)
if cur_diff < best_diff:
best_diff = cur_diff
best_thres = thres
records.append((label, best_thres))
df_ret = pd.DataFrame.from_records(records, columns=['label', 'thres'])
return df_ret.set_index('label')
def ensemble(dfs, weights):
label_keys = ['L{:02}'.format(l) for l in range(28)]
prob_keys = ['P{:02}'.format(l) for l in range(28)]
if 'L00' in dfs[0].index:
df_base = dfs[0][label_keys]
df_probs = sum([df[prob_keys] * w for df, w in zip(dfs, weights)]) / sum(weights)
df =
|
pd.concat([df_base, df_probs], axis=1)
|
pandas.concat
|
import pymc3 as pm
import pandas as pd
from covid.models.generative import GenerativeModel
from covid.data import summarize_inference_data
url = '../../data/covid19za_provincial_cumulative_timeline_confirmed.csv'
states_cases =
|
pd.read_csv(url, parse_dates=['date'], dayfirst=True, index_col=0)
|
pandas.read_csv
|
import pandas as pd
import requests
from tqdm.notebook import tqdm, trange
import math
def get_spotify(token, kind, elements=None, limit=50, offset=50, user=False):
"""
token: Oath Token
kind: Specify query kind albums, artists, tracks
user: Specify whether the query involves a specific user
elements (optional): Define spotify ids to query
"""
limit = f'?limit={limit}'
offset = f'&offset={offset}'
query_kind = kind.lower()
if user:
endpoint_root = 'https://api.spotify.com/v1/me'
else:
endpoint_root = 'https://api.spotify.com/v1'
if query_kind == 'albums':
query = f'/artists?ids={elements}'
elif query_kind == 'artists_albums':
query = f'/artists/{elements}/albums'
elif query_kind == "artists":
query = f'/artists?ids={elements}'
elif query_kind == 'user_tracks':
query = f'/tracks'
elif query_kind == 'album_tracks':
query = f'/albums/{elements}/tracks'
elif query_kind == 'audio-features':
query = f'/audio-features?ids={elements}'
elif query_kind == 'playlists':
query = f'/playlists'
elif query_kind == 'playlist_tracks':
query = f'/playlists/{elements}/tracks'
else:
print("No Endpoint Located")
if user:
endpoint = endpoint_root + query + limit + offset
else:
endpoint = endpoint_root + query
# https://api.spotify.com/v1/artists/{id}
# https://api.spotify.com/v1/me/top/{type}
# https://api.spotify.com/v1/audio-features
# https://api.spotify.com/v1/tracks
# https://api.spotify.com/v1/me/playlists
# https://api.spotify.com/v1/playlists/{playlist_id}/tracks
response = requests.get(url=endpoint, headers={'Authorization': 'Bearer ' + token}).json()
if user:
response_list = list()
print(response.keys())
print("Total Songs:", response['total'])
print("Max Limit:", response['limit'])
times = ((response['total'] - response['limit'])/response['limit'])
print(times)
response_list.append(response)
for i in trange(math.floor(times)):
response = requests.get(url=response['next'], headers={
'Authorization': 'Bearer ' + token}).json()
if response['next']:
response_list.append(response)
else:
response_list.append(response)
return response_list
else:
return response
def concat_df(response, albums=None):
df = pd.DataFrame()
for index, res in enumerate(tqdm(response)):
df = pd.concat([df, pd.DataFrame(res)])
if isinstance(albums, list): # Album is a numpy array
df['album_uri'] = albums[0][index]
df['album_id'] = albums[1][index]
return df
def date_parse_df(df, columns):
for col in columns:
df[f'{col}.year'] = pd.DatetimeIndex(df[col]).year
df[f'{col}.month'] = pd.DatetimeIndex(df[col]).month
df[f'{col}.day'] =
|
pd.DatetimeIndex(df[col])
|
pandas.DatetimeIndex
|
import functools
import multiprocessing
import os
import struct
import sys
import time
from concurrent.futures import as_completed
from contextlib import contextmanager
import anndata
import ngs_tools as ngs
import numpy as np
import psutil
import pandas as pd
from scipy import sparse
from . import config
from .logging import logger
# As of 0.0.1, these are provided by ngs_tools, but keep these here for now because
# they are imported from this file in many places.
run_executable = ngs.utils.run_executable
open_as_text = ngs.utils.open_as_text
decompress_gzip = ngs.utils.decompress_gzip
flatten_dict_values = ngs.utils.flatten_dict_values
mkstemp = ngs.utils.mkstemp
all_exists = ngs.utils.all_exists
flatten_dictionary = ngs.utils.flatten_dictionary
flatten_list = ngs.utils.flatten_list
merge_dictionaries = ngs.utils.merge_dictionaries
write_pickle = ngs.utils.write_pickle
read_pickle = ngs.utils.read_pickle
class UnsupportedOSException(Exception):
pass
class suppress_stdout_stderr:
"""A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
https://github.com/facebook/prophet/issues/223
"""
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
for fd in self.null_fds + self.save_fds:
os.close(fd)
def get_STAR_binary_path():
"""Get the path to the platform-dependent STAR binary included with
the installation.
:return: path to the binary
:rtype: str
"""
bin_filename = 'STAR.exe' if config.PLATFORM == 'windows' else 'STAR'
path = os.path.join(config.BINS_DIR, config.PLATFORM, 'STAR', bin_filename)
if not os.path.exists(path):
raise UnsupportedOSException(f'This operating system ({config.PLATFORM}) is not supported.')
return path
def get_STAR_version():
"""Get the provided STAR version.
:return: version string
:rtype: str
"""
p, stdout, stderr = run_executable([get_STAR_binary_path(), '--version'], quiet=True, returncode=1)
version = stdout.strip()
return version
def combine_arguments(args, additional):
"""Combine two dictionaries representing command-line arguments.
Any duplicate keys will be merged according to the following procedure:
1. If the value in both dictionaries are lists, the two lists are combined.
2. Otherwise, the value in the first dictionary is OVERWRITTEN.
:param args: original command-line arguments
:type args: dictionary
:param additional: additional command-line arguments
:type additional: dictionary
:return: combined command-line arguments
:rtype: dictionary
"""
new_args = args.copy()
for key, value in additional.items():
if key in new_args:
if isinstance(value, list) and isinstance(new_args[key], list):
new_args[key] += value
else:
new_args[key] = value
else:
new_args[key] = value
return new_args
def arguments_to_list(args):
"""Convert a dictionary of command-line arguments to a list.
:param args: command-line arguments
:type args: dictionary
:return: list of command-line arguments
:rtype: list
"""
arguments = []
for key, value in args.items():
arguments.append(key)
if isinstance(value, list):
arguments.extend(value)
else:
arguments.append(value)
return arguments
def get_file_descriptor_limit():
"""Get the current value for the maximum number of open file descriptors
in a platform-dependent way.
:return: the current value of the maximum number of open file descriptors.
:rtype: int
"""
if config.PLATFORM == 'windows':
import win32file
return win32file._getmaxstdio()
else:
import resource
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
def get_max_file_descriptor_limit():
"""Get the maximum allowed value for the maximum number of open file
descriptors.
Note that for Windows, there is not an easy way to get this,
as it requires reading from the registry. So, we just return the maximum for
a vanilla Windows installation, which is 8192.
https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/setmaxstdio?view=vs-2019
Similarly, on MacOS, we return a hardcoded 10240.
:return: maximum allowed value for the maximum number of open file descriptors
:rtype: int
"""
if config.PLATFORM == 'windows':
return 8192
elif config.PLATFORM == 'darwin':
return 10240
else:
import resource
return resource.getrlimit(resource.RLIMIT_NOFILE)[1]
@contextmanager
def increase_file_descriptor_limit(limit):
"""Context manager that can be used to temporarily increase the maximum
number of open file descriptors for the current process. The original
value is restored when execution exits this function.
This is required when running STAR with many threads.
:param limit: maximum number of open file descriptors will be increased to
this value for the duration of the context
:type limit: int
"""
old = None
if config.PLATFORM == 'windows':
import win32file
try:
old = win32file._getmaxstdio()
win32file._setmaxstdio(limit)
yield
finally:
if old is not None:
win32file._setmaxstdio(old)
else:
import resource
try:
old = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (limit, old[1]))
yield
finally:
if old is not None:
resource.setrlimit(resource.RLIMIT_NOFILE, old)
def get_available_memory():
"""Get total amount of available memory (total memory - used memory) in bytes.
:return: available memory in bytes
:rtype: int
"""
return psutil.virtual_memory().available
def make_pool_with_counter(n_threads):
"""Create a new Process pool with a shared progress counter.
:param n_threads: number of processes
:type n_threads: int
:return: (Process pool, progress counter, lock)
:rtype: (multiprocessing.Pool, multiprocessing.Value, multiprocessing.Lock)
"""
manager = multiprocessing.Manager()
counter = manager.Value('I', 0)
lock = manager.Lock()
pool = multiprocessing.Pool(n_threads)
return pool, counter, lock
def display_progress_with_counter(counter, total, *async_results, desc=None):
"""Display progress bar for displaying multiprocessing progress.
:param counter: progress counter
:type counter: multiprocessing.Value
:param total: maximum number of units of processing
:type total: int
:param *async_results: multiprocessing results to monitor. These are used to
determine when all processes are done.
:type *async_results: multiprocessing.pool.AsyncResult
:param desc: progress bar description, defaults to `None`
:type desc: str, optional
"""
with ngs.progress.progress(total=total, unit_scale=True, desc=desc) as pbar:
previous_progress = 0
while any(not async_result.ready() for async_result in async_results):
time.sleep(0.1)
progress = counter.value
pbar.update(progress - previous_progress)
pbar.refresh()
previous_progress = progress
def as_completed_with_progress(futures):
"""Wrapper around `concurrent.futures.as_completed` that displays a progress bar.
:param futures: iterator of `concurrent.futures.Future` objects
:type futures: iterable
"""
with ngs.progress.progress(total=len(futures)) as pbar:
for future in as_completed(futures):
yield future
pbar.update(1)
def split_index(index, n=8):
"""Split a conversions index, which is a list of tuples (file position,
number of lines, alignment position), one for each read, into `n`
approximately equal parts. This function is used to split the conversions
CSV for multiprocessing.
:param index: index
:type index: list
:param n: number of splits, defaults to `8`
:type n: int, optional
:return: list of parts, where each part is a list of
(file position, number of lines, alignment position) tuples
:rtype: list
"""
n_lines = sum(idx[1] for idx in index)
target = (n_lines // n) + 1 # add one to prevent underflow
# Split the index to "approximately" equal parts
parts = []
current_part = []
current_size = 0
for tup in index:
current_part.append(tup)
current_size += tup[1]
if current_size >= target:
parts.append(current_part)
current_size = 0
current_part = []
if current_part:
parts.append(current_part)
return parts
def downsample_counts(df_counts, proportion=None, count=None, seed=None, group_by=None):
"""Downsample the given counts dataframe according to the ``proportion`` or
``count`` arguments. One of these two must be provided, but not both. The dataframe
is assumed to be UMI-deduplicated.
:param df_counts: counts dataframe
:type df_counts: pandas.DataFrame
:param proportion: proportion of reads (UMIs) to keep, defaults to None
:type proportion: float, optional
:param count: absolute number of reads (UMIs) to keep, defaults to None
:type count: int, optional
:param seed: random seed, defaults to None
:type seed: int, optional
:param group_by: Columns in the counts dataframe to use to group entries.
When this is provided, UMIs are no longer sampled at random, but instead
grouped by this argument, and only groups that have more than ``count`` UMIs
are downsampled.
:type group_by: list, optional
:return: downsampled counts dataframe
:rtype: pandas.DataFrame
"""
rng = np.random.default_rng(seed)
if not group_by:
if bool(proportion) == bool(count):
raise Exception('Only one of `proportion` or `count` must be provided.')
n_keep = int(df_counts.shape[0] * proportion) if proportion is not None else count
return df_counts.iloc[rng.choice(df_counts.shape[0], n_keep, shuffle=False, replace=False)]
else:
if not count:
raise Exception('`count` must be provided when using `group_by`')
dfs = []
for key, df_group in df_counts.groupby(group_by, sort=False, observed=True):
if df_group.shape[0] > count:
df_group = df_group.iloc[rng.choice(df_group.shape[0], count, shuffle=False, replace=False)]
dfs.append(df_group)
return
|
pd.concat(dfs)
|
pandas.concat
|
# support for type hints for self/producer methods like PCA
from __future__ import annotations
# import argparse
# from ast import parse
# from os import P_ALL, error, path
# import sys
# import math
# from numpy.core.numeric import full
import pandas as pd
from pandas import plotting as pdplot
import numpy as np
from pmdarima import arima
from copy import deepcopy
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# from pmdarima.arima import auto_arima
# from pmdarima import model_selection
# from pmdarima.arima.utils import ndiffs
from matplotlib import pyplot as plt
# display options
pd.set_option("display.max_columns", 999)
|
pd.set_option("display.max_rows", 999)
|
pandas.set_option
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 08:08:52 2016
@author: tkc
"""
import re
from collections import defaultdict
import pandas as pd
import numpy as np
import scipy
import scipy.stats
from scipy import optimize
from math import factorial # used by Savgol matrix
from scipy.optimize import curve_fit
#%%
def organizecolumns(df1,mycols):
''' Pass df and template (list of desired columns in desired order) and return reorganized newdf
'''
cols1=df1.columns.tolist()
newdf=df1 # avoids modification of passed df
uniquelist=[i for i in cols1 if i not in mycols]
for i,colname in enumerate(uniquelist): # remove cols from df1 that are absent from df2
# newdf.drop(colname, axis=1, inplace=True) # this modifies both passed and returned dfs
newdf=newdf.drop(colname, axis=1)
newdf=newdf[mycols] # reorder columns based on template df
return newdf
def parseelemlist(elemlist):
'''Find and separate multielement peaks to be averaged (e.g. Fe2 & Fe) from longer string of element peaks
e.g. splits "Mg Fe Fe2 Si" into "Mg Si" and "{Fe,[Fe,Fe2]} dictionary'''
# Strip numbers from strings within list
newlist=[re.match('\D+',i).group(0) for i in elemlist]
# find duplicated peaks (multiple peaks per element)
Multielem = defaultdict(list)
for i, item in enumerate(newlist):
Multielem[item].append(i)
Multielem = {k:v for k,v in Multielem.items() if len(v)>1} # dictionary with duplicated item and list with indices
duplist=list(Multielem.values()) # get list
duplist=[item for sublist in duplist for item in sublist] # single list with positions of duplicated elements
# now alter multipeak elements list to give dict with element and then list of peak for that element
for key,value in Multielem.items():
templist=value # dictionary value is list of elem peak index positions
peaklist=[]
for i, index in enumerate(templist): # create new list with original elem peak from index positions
peaklist.append(elemlist[index])
# now replace list of index positions with elempeak names
Multielem.update({key:peaklist}) # key will be multipeak element string i.e. "Fe"
# finally construct new single elements list with multipeak ones removed (handle each separately)
newelemlist=[]
for i in range(0,len(elemlist)):
if i not in duplist:
newelemlist.append(elemlist[i])
return newelemlist, Multielem
def parseelem2(elemlist, Multielem):
''' After multielement peaks removed, also move secondary peaks used as primary to dict (handle separately)
e.g. splits "S Mg Fe2 Si" into "S Mg Si" and "{Fe,[Fe2]} dictionary; same structure and df output
for averaging of Fe, Fe2, or straight Fe2 or straight Fe'''
# starting elemlist will only have single entries (i.e Ti2 but not Ti & Ti2)
newelemlist=[]
for i, elem in enumerate(elemlist):
if re.search(r'\d',elem): # has number
match=re.search(r'\d',elem)
newkey=elem[0:match.start()]
# store alt quant (i.e. on Ti2) with same structure as multiple quant (Ti & Ti2)
# Another entry in multielement list... makes things easier for later quant comparisons
templist=[] # peakIDs added as list (of length 1)
templist.append(elem) # list containing single string (keeps identical data structure)
Multielem.update({newkey:templist}) # add to existing dictionary for separate handling
else:
newelemlist.append(elemlist[i]) # just copy over
return newelemlist, Multielem # return altered element list and multielem dictionary
def getelemthresholds(elemlist, AESquantparams):
'''get element-dependent significance thresholds for each peak from AESquantparams
return dictionary with element and associated significance level'''
thresholds={} # returns list of element dependent thresholds for this element set
for i, elem in enumerate(elemlist):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thresholds.update({elem:thiselemdata.siglevel})
return thresholds
def cloneparamrows(df):
''' Make param log entry for for each areanum - used by calccomposition to correctly process spe files with multiple spatial areas
passed df is usually list of spe files
this solves problem that AugerParamLog only has one entry (despite possibly having multiple distinct areas with different spectra'''
df['Areanumber']=1 # set existing entries as area 1
mycols=df.dtypes.index
newrows=pd.DataFrame(columns=mycols) # blank df for new entries
for index, row in df.iterrows():
numareas=int(df.loc[index]['Areas'])
for i in range(2,numareas+1):
newrow=df.loc[index] # clone this row as series
newrow=newrow.set_value('Areanumber',i)
newrows=newrows.append(newrow)
df=pd.concat([df,newrows], ignore_index=True) # merge new rows with existing ones
df=df.sort_values(['Filenumber','Areanumber'])
return df
def calccomp(df, Integquantlog, elemlist, AESquantparams):
'''Calculate elemental composition of given files based on input element list
threshold - ratio of element peak to noise peak (0 means no threshold applied
load element-dependent significance level from AESquantparams'''
thresholds=getelemthresholds(elemlist, AESquantparams) # Get list of sigma levels for significance/inclusion
# thresholds for both single and multipeak
elemlist, multipeaklist = parseelemlist(elemlist) # list of single peak elements and dict with multipeaks
# check if any of the single peaks are secondary (i.e. quant on Fe2 not main Fe)
elemlist, multipeaklist= parseelem2(elemlist, multipeaklist)
# two element lists needed (elements with one peak and elements with compositions averaged from two peaks i.e. Fe2, Fe3)
# to process compositions from multiple areas, clone rows from spe log (one for each areanum)
df=cloneparamrows(df) # splits single entry for 5 spatial area spe into 5 rows with Areanumber 1-5
df=df.reset_index(drop=True)
df['AESbasis']=0.0 # resets to zero if already present from calcamplitude
mycols=['Filenumber', 'Project', 'Filename', 'FilePath', 'Sample', 'Comments','AESbasis','Areanumber']
for i, elem in enumerate(elemlist): # add columns for basis
df[elem]=0.0 # add col for each element to spelist
df['sig'+elem]=0.0 # copy peak significance (ratio of integrated counts over 1 sigma of background)
df['err'+elem]=0.0 # another for total error in adjusted counts basis
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i,elem in enumerate(list(multipeaklist.keys())): # get elements (keys) from dict
df[elem]=0.0
df['sig'+elem]=0.0
df['err'+elem]=0.0
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i, elem in enumerate(elemlist): # now add at.% columns (e.g. %S, %Mg)
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i,elem in enumerate(list(multipeaklist.keys())): # add multipeak elements
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i in range(0,len(df)): # loop through all desired spectrum (multiarea ones already have duplicated rows)
filenum=df.iloc[i]['Filenumber']
areanum=df.iloc[i]['Areanumber']
match=Integquantlog[Integquantlog['Filenumber']==filenum] # find integ data for this filenumber
match=match[match['Areanumber']==areanum]
basis=0.0 #
for j, elem in enumerate(elemlist): # handle the single peak elements
temp=match[match['Element']==elem] # finds entry for this element
if len(temp)==1:
# thresholds is dict with required significance level for each element
thisthresh=thresholds.get(elem) # sig level for this element
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # always copy peak significance level
if temp.iloc[0]['Significance']>thisthresh: # if above set threshold then calculate elem's value and add to basis
df=df.set_value(i, elem, temp.iloc[0]['Adjcnts']) # copy adjusted counts of this element
df=df.set_value(i, 'err'+elem, temp.iloc[0]['Erradjcnts'])
basis+=temp.iloc[0]['Adjcnts'] # add this element's value to AES basis
# now handle the multipeak elements (get average value from both peaks)
for key, value in multipeaklist.items(): # key is element (aka colname in df), value is list of peaks in Smdifpeakslog
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
avgval=0.0 # working value for averaged adjamplitude
erravgval=0.0 # combined error from erradjcnts of each line
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds integquantlog entry for this peak (match already trimmed to filenum and area)
if len(temp)==1:
thisthresh=thresholds.get(peak) # sig level for this element/peak
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # copy peak significance level
if temp.iloc[0]['Significance']>thisthresh:
avgval+=temp.iloc[0]['Adjcnts']
thiserrperc=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']**2
erravgval+=thiserrperc # sum of square of relative error
else:
numlines=numlines-1 # if peak is zeroed out and not added, this reduces # peaks in average
if numlines>0: # avoid divbyzero if peak is too small
avgval=avgval/numlines # this is now average basis for given element
erravgval=np.sqrt(erravgval) # sqrt of sum of squares is relative error
df=df.set_value(i, key, avgval) # copy adjusted amplitude of this element
df=df.set_value(i, key+'err', avgval*erravgval) # combined actual error of this elem (as detemined from mulitple lines)
# add value from this element to AESbasis
basis+=avgval
# end of multipeak elements loop
df=df.set_value(i, 'AESbasis', basis) # write total basis value to df
# Now compute at.% for each listed element (incl errors)
for j, elem in enumerate(elemlist):
colname='%'+elem
ratio=df.iloc[i][elem]/df.iloc[i]['AESbasis'] # initialized to zero in cases where peak is below significance threshold
df.set_value(i, colname, ratio)
temp=match[match['Element']==elem] # again find peak entry and get finds entry for this peak
# TODO maybe check threshold again (although element's value will be zero)
if len(temp)==1:
thiserr=temp.iloc[0]['Erradjcnts']
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+elem # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# Also calculate for elements w/ multiple peaks (if present)
for key, value in multipeaklist.items():
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
colname='%'+key
ratio=df.iloc[i][key]/df.iloc[i]['AESbasis']
df.set_value(i, colname, ratio)
# TODO need to propagate errors through Fe & Fe2
errlist=[] # list of errors in % (usually max of two)
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds entry for this peak
if len(temp)==1:
if temp.iloc[0]['Adjcnts']>0: # skip negative values
err=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']
errlist.append(err) # add this to list
# combine errors in quadrature
totalerr=0.0
for j, err in enumerate(errlist):
totalerr+=err**2
totalerr=np.sqrt(totalerr) # percent error in at %
# now get actual error
thisval=df.iloc[i][key] # this is averaged value computed above (possibly zero if below thresholds )
thiserr=thisval*totalerr # error (in Fe) as actual value based on average of multiple peaks
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+ key # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# end of loop calculation for each spectrum
# organize data based on mycols template
df=organizecolumns(df,mycols)
return df
def calcadjcounts(df, AESquantparams, sig=2, kerrors=True):
'''For each elemental peak in interquantlog, calculate or recalcuated adjusted counts using k-factor2 and mass
result stored in adjcnts column and used for subsequent compositional determinations
can change AESquantresults and recalc at any time; sig (aka 2 sigma errors) is default setting
kerrors -- include error associated with kfactor (along with Poisson errors)'''
if 'Adjcnts' not in df:
df['Adjcnts']=0.0 # new column for adjusted amplitude (if not already present)
if 'Erradjcnts' not in df:
df['Erradjcnts']=0.0 # new column for associated error
if 'err%cnts' not in df:
df['err%cnts']=0.0 # percentage error only from counting statistics (not including kfactor err)
if 'err%total' not in df:
df['err%total']=0.0 # percentage error only from counting statistics (not including kfactor err)
# loop for each element, mask df, get appropriate k-factor & mass
df=df.reset_index(drop=True) # go ahead and reset index
elemlist=np.ndarray.tolist(df.Element.unique()) # list of unique elements from df
for i,elem in enumerate(elemlist):
match=AESquantparams[(AESquantparams['element']==elem)]
match=match.reset_index(drop=True)
kfactor2=match.iloc[0]['kfactor2'] # kfactor and mass for this element/peak
errkf2=match.iloc[0]['errkf2'] # percent error in above for integ method
mass=match.iloc[0]['mass']
elemmask=(df['Element']==elem) # mask for this element in loop
for j in range(0,len(df)): # loop and set adjamplitude to amp*kfact/mass
if elemmask[j]==True: # row has this element
newval=df.iloc[j]['Integcounts']*kfactor2/mass
percerr=sig/np.sqrt(df.iloc[j]['Integcounts']) # 2/sqrt(N) is percent error
totalerr=np.sqrt(errkf2**2+percerr**2) # combine in quadrature
err=newval*totalerr # error value is adjusted counts * 2 sig error percentage
df=df.set_value(j,'Adjcnts',newval)
df=df.set_value(j,'err%cnts',percerr)
df=df.set_value(j,'err%total',totalerr)
df=df.set_value(j,'Erradjcnts',err)
return df
''' TESTING
df=lowerfitpeak
'''
def fitlinearregion(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform linear fit
return chunk with backfit column added '''
colname='Smcounts'+str(areanum) # use smoothed counts instead of counts for background fits
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
try:
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xcol, ycol)
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a','n/a') # return all n/a
return df, fitparams
fitparams=(slope, intercept, r_value, p_value, std_err) # tuple to return fitting results
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval=slope * xval + intercept
df=df.set_value(index, backfitname, yval)
return df, fitparams
def makelinebackground(df, areanum, fitparams):
'''Create linear background under peak region
passed small slice of Augerfile df just peak region and small adjacent background '''
if fitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
slope=fitparams[0]
intercept=fitparams[1]
backfitname='Backfit'+str(areanum)
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=slope*xval+intercept
df=df.set_value(index,backfitname,yval)
return df # return same df with interpolated background region added
def makeinterplinebackground(df, areanum, lowerfitparams, upperfitparams):
'''Create interpolated background from lower and upper peak fits
passed small slice of Augerfile df just peak region and small adjacent background '''
# check for n/a values
if lowerfitparams[0]=='n/a' or upperfitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
lowslope=lowerfitparams[0]
lowintercept=lowerfitparams[1]
upslope=upperfitparams[0]
upintercept=upperfitparams[1]
backfitname='Backfit'+str(areanum)
if len(df)>0: # entire region passed should have no vals in backfit (only interpolated region)
evstep=1/(len(df)+1)
else:
print('Unspecified error in creating background')
return
startrow=df.iloc[0].name # index of first value
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=(1-evstep*(index-startrow))*(lowslope*xval+lowintercept)+evstep*(index-startrow)*(upslope*xval+upintercept)
df=df.set_value(index,backfitname,yval)
return df # return same df with interpolated background region added
def fitCapeak(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform linear fit
return chunk with backfit column added '''
colname='Smcounts'+str(areanum)
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
parabfunc=lambda x, a, b, c: a*x**2 + b*x + c # lambda definition of cubic poly
fitparams, cov =curve_fit(parabfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-parabfunc(xcol,*fitparams)), (ycol-parabfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# diagonal of covariance matrix contains variances for fit params
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a') # return all n/a
R2='n/a'
return df, fitparams, R2
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**2+ fitparams[1] * xval + fitparams[2]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makeCabackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=3: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=A*xval**2+ B* xval +C
df=df.set_value(index,backfitname,yval)
return df
def fitcubic(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform cubic fit
return chunk with backfit column added '''
colname='Smcounts'+str(areanum) # use smoothed data for background fits
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
cubicfunc=lambda x, a, b, c, d: a*x**3 + b*x**2 + c*x + d # lambda definition of cubic poly
fitparams, cov =curve_fit(cubicfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-cubicfunc(xcol,*fitparams)), (ycol-cubicfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# TODO insert special handling for failed fits (some R2 threshold)
# Maybe restrictions on curvature
except: # deal with failed fit
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
return df, fitparams
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**3+ fitparams[1] * xval**2 + fitparams[2] * xval + fitparams[3]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makecubicbackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=4: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
D=fitparams[3]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval= A * xval**3+ B * xval**2 + C * xval + D
df=df.set_value(index,backfitname,yval)
return df
'''
For background fit testing
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[0][0]
fittype=Elemdata[0][1]
integpeak=Elemdata[0][2]
lower1=Elemdata[0][3]
lower2=Elemdata[0][4]
upper1=Elemdata[0][5]
upper2=Elemdata[0][6]
df=fitregion
Augerfile.to_csv('C2010W_18Nov15_12231225.csv', index=False)
'''
''' TESTING OF BELOW FITS
plt.plot(xcol,ycol,'b-') # actual data in blue
plt.plot(xcol,gaussian(fitparams, xcol),'r-') # Gaussian fit in red
'''
def fitgauss(df, areanum, width, elem, AugerFileName, addgauss=True):
''' Gaussian fit of direct peaks (pass Augerfile just around peaks region
no need to save Gaussian fit, just return width and other params
integwidth pass from AESquantparams value'''
peakname='Peaks'+str(areanum)
# Remove nan values from peak region
df=df.dropna(subset=[peakname]) # remove nan entries from peak
# estimate initial Gaussian parameters from data
if df.empty: # deal with prior failed background fits (no data in this region after dropna
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
xc=df[peakname].idxmax() # estimate center based on peak max index
xc=df.loc[xc]['Energy'] # associated energy value near center
peakarea=df[peakname].sum() # decent area estimate
y0=0 #
params0=[xc,width,peakarea,y0] # initial params list (first guess at gaussian params)
xcol=df['Energy']
ycol=df[peakname] # Counts1, Counts2 or whatever
xcol=xcol.as_matrix() # convert both to numpy matrices
ycol=ycol.as_matrix()
# define standard gaussian funct (xc, width, area and yoffset are init params)
gaussian=lambda params, x: params[3]+params[2]/(params[1]*np.sqrt(2*np.pi))*np.exp(-((x-params[0])**2/(2*params[1]**2)))
# thisgauss= gaussian(params0,xcol)
errfunc=lambda p, xcol, ycol: ycol- gaussian(p,xcol) # lambda error funct definition
# sigma2FWHM = lambda sigma: sigma * sqrt(2 * log(2)) * 2 / sqrt(2) # convert Gaussian widths to FWHM?
try:
fitparams, cov, infodict, mesg, ier =optimize.leastsq(errfunc,params0,args=(xcol,ycol),full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((ycol-ycol.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
except: # fitting problem
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
if addgauss==True:
gaussname="Gauss"+str(areanum)
df[gaussname]='' # add col for gaussian fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval=fitparams[3]+fitparams[2]/(fitparams[1]*np.sqrt(2*np.pi))*np.exp(-((xval-fitparams[0])**2/(2*fitparams[1]**2)))
df.set_value(index,gaussname,yval)
return df, fitparams, rsquared, ier
''' TESTING
For background fit testing
df=fitregion
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[1][0]
fittype=Elemdata[1][1]
integpeak=Elemdata[1][2]
lower1=Elemdata[1][3]
lower2=Elemdata[1][4]
upper1=Elemdata[1][5]
upper2=Elemdata[1][6]
integwidth=Elemdata[0][8]
if ier in [1,2,3,4]: print ('true')
'''
def integpeaks(Augerfile, Backfitparams, areanum, Elemdata, Shifts, logmatch):
''' Background fit for each direct peak, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as Augerfile,
fits peak backgrounds above and below using Elemdata, saves background to source csv (overwrites existing fits), also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
'''
#create Smdifpeaks dataframe for temp storage of each peak's params
Backfitparams=Backfitparams.dropna(subset=['Rval1']) # skip Gaussian fit if background fit fails
AugerFileName=logmatch.Filename #
# Create temp df to hold and pass linear fit data
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Areanumber', 'Element', 'Integcounts',
'Backcounts', 'Significance', 'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels']
Integresults=pd.DataFrame(columns=mycols) # empty df for all integ results for elems in this spe file
peakname='Peaks'+str(areanum) # this is counts - background (but only calculated in vicinity of known elemental peaks)
backfitname='Backfit'+str(areanum)
# all fit regions modify fit region boundaries for this spectrum based on smooth-differentiated peak (2nd deriv, Savgol (poly=2, pts=11))
# global shifts from smdifpeaks and local shift based on smoothed 2nd derivative
# already incorporated into Elemdata values (lower1,2 and upper1,2 fully adjusted)
# loop through and fit all peaks for each element in this spatial area
for i, (elem, fittype, integpeak, lower1, lower2, upper1, upper2, kfact, integwidth, siglevel) in enumerate(Elemdata):
# linear fit below this elem's peak (shifts and adjustments already made)
Integresult=pd.DataFrame(index=np.arange(0,1),columns=mycols) # blank df row for this element
if i in Backfitparams.index: # skips integ calc if backfit is n/a (but does put n/a entries in datalog)
fitregion=Augerfile[lower1:upper2+1]
if fitregion.empty==True: # skip if no data present (already should be skipped in Elemdata)
continue
# addgauss if save of gaussian peak fit in Augerfile is desired
# Probably could skip Gaussian fitting entirely if peak is weak (check smdiff)
fitregion, fitparams, rsquared, ier = fitgauss(fitregion, areanum, integwidth, elem, AugerFileName, addgauss=True)
addgauss=True # maybe pass this arg from elsewhere
if addgauss==True and ier in [1,2,3,4]: # copy gaussian fit over to csv file if successful
gaussname="Gauss"+str(areanum)
if gaussname not in Augerfile.dtypes.index: # add col if not already present
Augerfile[gaussname]='' # add col for gaussian fit
# Copy gaussian fit to Augerfile... fitregion only modified in new Gauss peak fit column
Augerfile.loc[fitregion.index,fitregion.columns]=fitregion
# if gaussian fit is successful set center integration channel to index nearest xc
# ier flag of 1,2,3,4 if fit succeeds but rsquared threshold is better
if rsquared!='n/a': # skip integcounts calc but do put 'n/a' entries in df
if rsquared>0.4:
xc=fitparams[0] # center of gaussian fit
center=int(round(xc,0))
tempdf=fitregion[fitregion['Energy']==center]
try:
centerindex=tempdf[peakname].idxmax() # corresponding index # of peak maximum
except:
print('Gaussian fit center out of data range for ', elem, ' in ', AugerFileName)
centerindex=integpeak # backup method of finding center of integration region
else: # indication of poor Gaussian fit R2<0.4 (use prior knowledge of peak position)
print('Failed gaussian fit for ', elem, ' in ', AugerFileName)
# set center integration channel to value passed by integpeak
# this is ideal energy value but adjusted by shift found using smooth-diff quant method
centerindex=integpeak # already stores index number of central peak (ideal - sm-diff shift value)
# Still do the counts integration for poor gaussian fits
# perform integration over peak center channel + integwidth on either side
Augerpeak=Augerfile[centerindex-integwidth:centerindex+integwidth+1]
integcounts=Augerpeak[peakname].sum() # get counts sum
backgroundcnts=Augerpeak[backfitname].sum() # sum counts over identical width in background fit
# Used for peak significance i.e. typically 2 sigma of background integration over identical width
# full integ width is 1.2*FWHM but integwidth here is closest integer half-width
# Write fit params from tuple over to Integresult df
Integresult.iloc[0]['Integcounts']=integcounts
Integresult.iloc[0]['Backcounts']=backgroundcnts
Integresult.iloc[0]['Significance']=round(integcounts/(np.sqrt(backgroundcnts)),3)
# TODO add 2/sqrt(n) calc of associated percent error (also can calculate later)
Integresult.iloc[0]['Numchannels']=integwidth
Integresult.iloc[0]['Rsquared']=rsquared
Integresult.iloc[0]['Element']=elem
# These will be n/a if fit fails
Integresult.iloc[0]['Xc']=fitparams[0]
Integresult.iloc[0]['Width']=fitparams[1]
Integresult.iloc[0]['Peakarea']=fitparams[2]
Integresult.iloc[0]['Y0']=fitparams[3]
Integresults=Integresults.append(Integresult, ignore_index=True) # add row to list with valid
# end of loop through each element
# assign params that are common to all areas/all peaks into rows of df (copied from original log)
for index,row in Integresults.iterrows():
Integresults.loc[index]['Filenumber']=logmatch.Filenumber
Integresults.iloc[index]['Filename']=logmatch.Filename
Integresults.iloc[index]['Filepath']=logmatch.FilePath
Integresults.iloc[index]['Sample']=logmatch.Sample
Integresults.iloc[index]['Comments']=logmatch.Comments
Integresults.loc[index]['Areanumber']=areanum
Integresults=Integresults[mycols] # put back in original order
return Augerfile, Integresults # df with direct peak fitting info for all areas/ all elements
''' TESTING BACKGROUNDS
elem, fittype, integpeak, lower1, lower2, upper1, upper2, kfact, integwidth, siglevel=Elemdata[5]
'''
def fitbackgrounds(Augerfile, areanum, Elemdata, Shifts, logmatch):
''' Background fit for each direct peak, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as Augerfile,
fits peak backgrounds above and below using Elemdata, saves background to source csv (overwrites existing fits), also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
'''
#create dataframe for temp storage of each peak's params
dim=len(Elemdata)# can't write to non-existant df row so set # of rows as numareas*len(Elements)
dfrow=0 # keep track of row # for Backfitparams dataframe
# Create temp df to hold and pass linear fit data
AugerFileName=logmatch.Filename #
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Date', 'Areanumber', 'Element', 'Lower1', 'Lower2', 'Upper1',
'Upper2', 'Lowrange','Highrange','Peakshift', 'Fittype', 'P1','P2','P3','P4','Rval1', 'Pval1', 'Stderr1','Rval2', 'Pval2', 'Stderr2']
Backfitparams=pd.DataFrame(index=np.arange(0,dim),columns=mycols)
peakname='Peaks'+str(areanum)
backfitname='Backfit'+str(areanum)
countname='Counts'+str(areanum)
# all fit regions modify fit region boundaries for this spectrum based on smooth-differentiated peak (2nd deriv, Savgol (poly=2, pts=11))
# global shifts from smdifpeaks and local shift based on smoothed 2nd derivative
# already incorporated into Elemdata values (lower1,2 and upper1,2 fully adjusted)
# loop through and fit all peaks for each element in this spatial area
for i, (elem, fittype, integpeak, lower1, lower2, upper1, upper2, kfact, integwidth, siglevel) in enumerate(Elemdata):
# linear fit below this elem's peak (shifts and adjustments already made)
if fittype=='interp_lin':
lowerfitpeak=Augerfile[lower1:lower2+1]
lowevrange=str(round(lowerfitpeak['Energy'].min(),0)) +'-'+ str(round(lowerfitpeak['Energy'].max(),0)) # string with eV range of lower peak
lowerfitpeak, lowerfitparams=fitlinearregion(lowerfitpeak, areanum, elem, AugerFileName) # return df with background fit and fit params
# linear fit above this elem's peak
upperfitpeak=Augerfile[upper1:upper2+1]
upperevrange=str(round(upperfitpeak['Energy'].min(),0)) +'-'+ str(round(upperfitpeak['Energy'].max(),0)) # string with eV range of lower peak
# alter fit region based on second derivative
upperfitpeak, upperfitparams=fitlinearregion(upperfitpeak, areanum, elem, AugerFileName) # return df with background fit and fit params
# Copies modified backfit column back to original (same index range)
Augerfile.loc[lowerfitpeak.index,lowerfitpeak.columns]=lowerfitpeak
Augerfile.loc[upperfitpeak.index,upperfitpeak.columns]=upperfitpeak
# now make interpolated background over peak region
thispeak=Augerfile[lower2:upper1] # just pass empty interpolated region
thispeak=makeinterplinebackground(thispeak, areanum, lowerfitparams, upperfitparams)
# Copies modified interpolated region column back to original (same index range)
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak
if fittype=='line': # single line (same slope before and after peak/ no interpolation)
lowerfitpeak=Augerfile[lower1:lower2+1]
lowevrange=str(round(lowerfitpeak['Energy'].min(),0)) +'-'+ str(round(lowerfitpeak['Energy'].max(),0)) # string with eV range of lower peak
upperfitpeak=Augerfile[upper1:upper2+1]
upperevrange=str(round(upperfitpeak['Energy'].min(),0)) +'-'+ str(round(upperfitpeak['Energy'].max(),0)) # string with eV range of lower peak
fitregion=pd.concat([lowerfitpeak, upperfitpeak])
fitpeak, fitparams=fitlinearregion(fitregion, areanum, elem, AugerFileName) # return df with background fit and fit params
if fitparams[2]!='n/a': # holds R2 value and skip for failed fits
Augerfile.loc[fitpeak.index,fitpeak.columns]=fitpeak # Copy/save to original file
# Need to generate values for peak region from single linear fit
thispeak=Augerfile[lower2:upper1]
thispeak=makelinebackground(thispeak, areanum, fitparams)
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak # copy peak region to source data file
if fittype=='cubic': # special treatment
lowerfitpeak=Augerfile[lower1:lower2+1]
lowevrange=str(round(lowerfitpeak['Energy'].min(),0)) +'-'+ str(round(lowerfitpeak['Energy'].max(),0)) # string with eV range of lower peak
upperfitpeak=Augerfile[upper1:upper2+1]
upperevrange=str(round(upperfitpeak['Energy'].min(),0)) +'-'+ str(round(upperfitpeak['Energy'].max(),0)) # string with eV range of lower peak
fitpeak=pd.concat([lowerfitpeak, upperfitpeak])
fitpeak, fitparams, R2 =fitcubic(fitpeak, areanum, elem, AugerFileName) # polynomial fit
if R2!='n/a': #skip failed fit background copy
Augerfile.loc[fitpeak.index,fitpeak.columns]=fitpeak# copy over to full spe file
thispeak=Augerfile[lower2:upper1+1] # actual peak region
thispeak = makecubicbackground(thispeak, areanum, fitparams) # now fill peak region with 2nd order poly background
Augerfile.loc[thispeak.index,thispeak.columns]=thispeak # copy peak region to source data file
if fittype=='Ca': # special treatment
# find relative minimum if present between C falling edge and Ca peak
smcountname='Smcounts'+str(areanum)
minindex=Augerfile[lower1:lower1+10][smcountname].idxmin() # index value of min left of Ca peak (counts or smoothed counts)
# minval=Augerfile[lower1:lower1+10][countname].min()
# maxindex=Augerfile[integpeak-5:integpeak+5][countname].idxmax() # Ca peak index if present
# maxval=Augerfile[integpeak-5:integpeak+5][countname].max()
# polynomial fit over two pts at relative min left of peak and small region right of peak
thispeak=
|
pd.concat([Augerfile[minindex-1:minindex+1],Augerfile[integpeak+10:integpeak+15]])
|
pandas.concat
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import assignment2_helper as helper
# Look pretty...
matplotlib.style.use('ggplot')
# Do * NOT * alter this line, until instructed!
scaleFeatures = True
# TODO: Load up the dataset and remove any and all
# Rows that have a nan. You should be a pro at this
# by now ;-)
#
# .. your code here
original_df = pd.read_csv ('Datasets/kidney_disease.csv')
new_df = original_df.dropna()
# Create some color coded labels; the actual label feature
# will be removed prior to executing PCA, since it's unsupervised.
# You're only labeling by color so you can see the effects of PCA
labels = ['red' if i=='ckd' else 'green' for i in new_df.classification]
# TODO: Instead of using an indexer to select just the bgr, rc, and wc,
#alter your assignment code to drop all the nominal features listed above.
#Be sure you select the right axis for columns and not rows, otherwise Pandas will complain!
#
# .. your code here ..
new_df.dtypes
df=new_df.drop(['id', 'classification', 'rbc', 'pc', 'pcc', 'ba', 'htn', 'dm', 'cad', 'appet', 'pe', 'ane'],1)
# TODO: Print out and check your dataframe's dtypes. You'll probably
# want to call 'exit()' after you print it out so you can stop the
# program's execution.
df.dtypes
# Does everything look like it should / properly numeric?
#If not, make code changes to coerce the remaining column(s).
#
df.wc=
|
pd.to_numeric(df.wc)
|
pandas.to_numeric
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 =
|
pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
|
pandas.DataFrame
|
"""
///////////////////////////////////////////////////////////////
APPLICATION BY: <NAME>
Artemis created with: Qt Designer and PySide6
JUNE 10, 2021
V: 1.0.0
///////////////////////////////////////////////////////////////
"""
import pandas as pd
from pandas.core.frame import DataFrame
class Aimer:
"""Class that contains benchmark data for Aimer."""
def get_start_benchmarks(self) -> dict:
"""Benchmarks used to determine starting rankdatetime A combination of a date and a time. Attributes: ()
Returns:
[dict]: A dictonary containing scenario keys and score values.
"""
starterBenchmarks = {
"Beginner": {
"1wall6targets TE": 120,
"Bounce 180 Tracking": 45,
"Close Long Strafes Invincible": 11500,
},
"IBeginner": {
"1wall6targets TE": 130,
"Bounce 180 Tracking": 50,
"Close Long Strafes Invincible": 12800,
},
"AdvBeginner": {
"1wall6targets TE": 140,
"Bounce 180 Tracking": 56,
"Close Long Strafes Invincible": 1400,
},
"SubIntermediate": {
"1wall6targets TE": 150,
"Bounce 180 Tracking": 63,
"Close Long Strafes Invincible": 14800,
},
"Intermediate": {
"1wall6targets TE": 165,
"Bounce 180 Tracking": 73,
"Close Long Strafes Invincible": 16100,
},
"Advanced": {
"1wall6targets TE": 180,
"Bounce 180 Tracking": 85,
"Close Long Strafes Invincible": 16800,
},
"SubAimbeast": {
"1wall6targets TE": 181,
"Bounce 180 Tracking": 86,
"Close Long Strafes Invincible": 16801,
},
}
return starterBenchmarks
def get_beginner_routines(self) -> list:
"""Returns three routines for beginners. Tracking, Click, and Complete in that order.
Returns:
list: Returns 3 lists for tacking, click, and complete routines.
"""
tracking = [
"Midrange Long Strafes Invincible",
"air far long strafes",
"Vertical Long Strafes",
"Bounce 180 Tracking Large",
"1wall6targets TE",
"Tile Frenzy – Strafing – 01",
"Air Invincible",
]
click = [
"1wall6targets TE",
"Tile Frenzy – Strafing – 02",
"5 Sphere Hipfire small",
"Tile Frenzy Flick",
"Midrange Long Strafes Invincible",
"Vertical Long Strafes",
]
complete = [
"Midrange Long Strafes Invincible",
"1wall6targets TE",
"air far long strafes",
"Tile Frenzy – Strafing – 01",
"Vertical Long Strafes",
"Target Switching 90",
]
return tracking, click, complete
def get_ibeginner_routines(self) -> list:
"""Returns three routines for intermediate beginners. Tracking, Click, and Complete in that order.
Returns:
list: Returns 3 lists for tracking, click, and complete routines.
"""
tracking = [
"Close Long Strafes Invincible",
"Smoothness Trainer",
"Vertical Long Strafes",
"Tile Frenzy 180 Strafing 200 Percent Tracking",
"1wall9000targets",
"Tile Frenzy 180",
]
click = [
"1w2ts reload",
"Tile Frenzy 180",
"1wall5targets_pasu slow",
"patTargetSwitch easy ",
"fuglaaXYLongstrafes",
"Vertical Long Strafes",
]
complete = [
"Close Long Strafes Invincible",
"1w2ts reload",
"gp far long strafes",
"1wall9000targets",
"patTargetSwitch easy",
"1wall5targets_pasu slow",
]
return tracking, click, complete
def get_advbeginner_routines(self) -> list:
"""Returns three routines for advanced beginners. Tracking, Click, and Complete in that order.
Returns:
list: Returns 3 lists for tracking, click, and complete routines.
"""
tracking = [
"RexStrafesCata Invincible",
"Close Long Strafes Invincible",
"LG Pin Practice 360",
"1wall6Targets TE",
"Thin Aiming Long Invincible",
"1w2ts reload",
]
click = [
"1wall6targets TE",
"1w2ts reload",
"Bounce 180 Reload",
"patTargetSwitch easy",
"1wall5targets_pasu slow",
"gp 2 invincible",
]
complete = [
"RexStrafesCata Invincible",
"1wall6targets TE",
"Air Target Switching 360",
"1wall5targets_pasu slow",
"Close Long Strafes Invincible",
"5 Sphere Hipfire small",
]
return tracking, click, complete
def get_subintermediate_routines(self) -> list:
"""Returns three routines for sub-intermediate. Tracking, Click, and Complete in that order.
Returns:
list: Returns 3 lists for tracking, click, and complete routines.
"""
tracking = [
"Midrange Fast Strafes V2",
"Close Fast Strafes Easy",
"Vertical Long Strafes",
"Tile Frenzy 180 Strafing 300 Percent Tracking",
"Bounce 180 Tracking",
"Ground Plaza Easy",
]
click = [
"Bounce 180 Reload",
"1w6ts reload",
"1wall5targets_pasu Reload",
"Air Invincible 6",
"Wide Wall 6Targets",
"Ground Plaza Easy",
]
complete = [
"Midrange Fast Strafes V2",
"Close Fast Strafes Easy",
"Bounce 180 Reload",
"1wall5targets_pasu Reload",
"Tile Frenzy 180 Strafing 300 Percent Tracking",
"Vertical Long Strafes",
]
return tracking, click, complete
def get_intermediate_routines(self) -> list:
"""Returns three routines for intermediate. Tracking, Click, Complete and target scores in that order.
Returns:
list: Returns 6 lists for tracking, click, and complete routines.
"""
tracking = [
"Close Fast Strafes Invincible",
"Vertical Fast Strafes",
"Ground Plaza Easy",
"Air no UFO no SKYBOTS",
"Bounce 180 Tracking",
"1wall5targets_pasu Reload",
]
click = [
"Close Fast Strafes Easy Invincible",
"1wall5targets_pasu Reload",
"1wall 6targets small",
"Target Acquisition Flick Easy",
"Wide Wall 6Targets",
"Bounce 180",
]
complete = [
"Close Fast Strafes Invincible",
"Vertical Fast Strafes",
"Ground Plaza Easy",
"Bounce 180 Tracking",
"1wall5targets_pasu Reload",
"Bounce 180",
"Target Acquisition Flick Easy",
]
# Must beat at least 5 to advance.
trackingTargetScores = {
"Close Fast Strafes Invincible": 9800,
"Vertical Fast Strafes": 8500,
"Ground Plaza Easy": 894,
"Air no UFO no SKYBOTS": 835,
"Bounce 180 Tracking": 74,
"1wall5targets_pasu Reload": 103,
}
clickTargetScores = {
"Close Fast Strafes Easy Invincible": 13000,
"1wall5targets_pasu Reload": 105,
"1wall 6targets small": 1025,
"Target Acquisition Flick Easy": 92,
"Wide Wall 6Targets": 117,
"Bounce 180": 70,
}
completeTargetScores = {
"Close Fast Strafes Invincible": 9400,
"Vertical Fast Strafes": 8300,
"Ground Plaza Easy": 892,
"Bounce 180 Tracking": 73,
"1wall5targets_pasu Reload": 103,
"Bounce 180": 65,
"Target Acquisition Flick Easy": 90,
}
return (
tracking,
click,
complete,
trackingTargetScores,
clickTargetScores,
completeTargetScores,
)
def get_advanced_routines(self):
"""Returns three routines for intermediate. Tracking, Click, Complete and target scores in that order."""
# Must beat at least 10 of the 16 in each to advance.
trackingTargetScores = {
"Close Fast Strafes Invincible": 11000,
"Close FS Easy Dodge": 17400,
"Vertical Fast Strafes": 10000,
"Air no UFO no SKYBOTS": 857,
"Air Dodge": 110,
"Pasu Track Invincible v2": 6300,
"Ground Plaza NO UFO": 99913,
"lcg3 Reborn Varied": 28000,
"Popcorn Goated Tracking Invincible": 1100,
"Thin Gauntlet V2": 872,
"Bounce 180 Tracking Small Invincible Fixed": 4000,
"kinTargetSwitch small no reload": 16300,
"Bounce 180 Tracking Small": 60,
"psalmflick Strafing Tracking": 64,
"patTargetSwitch Dodge 360": 11600,
"Bounce 180 Tracking": 83,
"kinTargetSwitch No Reload": 21000,
"1wall5targets_pasu Reload": 118,
"Popcorn Sparky": 200,
"Bounce 180": 80,
"Wide Wall 6Targets": 124,
}
clickTargetScores = {
"Ground Plaza Easy": 903,
"Pasu Track Invincible v2": 6200,
"Air no UFO no SKYBOTS": 852,
"Air Dodge": 108,
"Pokeball Frenzy Auto Small": 48,
"1wall6 flick": 118,
"devTargetSwitch Goated": 530,
"patTargetSwitch Dodge 360": 11600,
"Bounce 180 Tracking": 82,
"patTargetSwitch No Reload": 6800,
"Popcorn Sparky": 210,
"Reflex Flick – Fair": 5000,
"Close Fast Strafes Shotgun": 14200,
"patTargetClick 1shot": 6000,
"1wall5targets_pasu Reload": 122,
"Pasu Dodge Easy": 380000,
"Bounce 180": 85,
"1wall 6targets small": 1150,
"Target Acquisition Flick": 65,
"Reflex Flick – Mini": 4800,
"1wall9000targets": 250,
"Wide Wall 6Targets": 127,
"1w2ts reload": 92,
"Microshot Speed": 185,
"1wall6targets TE": 180,
}
completeTargetScores = {
"Close Fast Strafes Invincible": 10700,
"Close FS Easy Dodge": 17000,
"Vertical Fast Strafes": 9750,
"Pasu Track Invincible v2": 6200,
"Air no UFO no SKYBOTS": 854,
"Air Dodge": 109,
"psalmflick Strafing Tracking": 65,
"kinTargetSwitch small no reload": 16500,
"kinTargetSwitch No Reload": 21000,
"patTargetSwitch Dodge 360": 12000,
"patTargetSwitch No Reload": 7000,
"Bounce 180 Tracking": 85,
"1wall6 flick": 120,
"Popcorn Sparky": 200,
"Reflex Flick – Fair": 4700,
"Close Fast Strafes Shotgun": 13800,
"patTargetClick 1shot": 5850,
"1wall5targets_pasu Reload": 118,
"Pasu Dodge Easy": 370000,
"Bounce 180": 80,
"1w2ts reload": 90,
"1wall9000targets": 243,
"Wide Wall 6Targets": 124,
"1wall 6targets small": 1100,
"Microshot Speed": 182,
}
return (
list(trackingTargetScores.keys()),
list(clickTargetScores.keys()),
list(completeTargetScores.keys()),
trackingTargetScores,
clickTargetScores,
completeTargetScores,
)
class Sparky:
"""Sparky Object."""
def __init__(self):
self.data = {
"Scenario": [
"1wall5targets_pasu Reload"
"Popcorn Sparky"
"ww6t reload"
"1w6ts reload v2"
"Bounce 180 Sparky"
"Air no UFO no SKYBOTS"
"Ground Plaza Sparky v3"
"Popcorn Goated Tracking Invincible"
"Thin Gauntlet V2"
"Pasu Track Invincible v2"
"patTS NR"
"Bounce 180 Tracking"
"kinTS NR"
"devTS Goated NR"
"voxTargetSwitch"
],
"Silver": [
80,
70,
105,
85,
55,
780,
850,
500,
825,
3600,
5400,
45,
15000,
400,
67,
],
"Gold": [
90,
100,
115,
95,
65,
810,
860,
600,
845,
4500,
6000,
56,
16500,
450,
77,
],
"Platinum": [
105,
150,
125,
105,
75,
835,
872.5,
800,
862.5,
5500,
6500,
68,
18500,
500,
87,
],
"Diamond": [
120,
200,
135,
115,
85,
853,
885,
1100,
872.5,
6200,
7100,
80,
20500,
550,
97,
],
"Master": [
135,
250,
145,
125,
95,
868,
892,
1300,
882.5,
6900,
7700,
90,
22000,
600,
107,
],
"Grandmaster": [
145,
300,
160,
137,
105,
877,
900,
1600,
895,
7300,
8000,
100,
23500,
630,
112,
],
}
self.df =
|
pd.DataFrame(data=self.data)
|
pandas.DataFrame
|
import glob
import itertools
import json
import logging
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow import keras
import nanotune as nt
from nanotune.classification.classifier import (DEFAULT_CLF_PARAMETERS,
DEFAULT_DATA_FILES,
METRIC_NAMES, Classifier)
logger = logging.getLogger(__name__)
metric_mapping = {
"accuracy_score": "accuracy",
"auc": "AUC",
"average_precision_recall": "precision recall",
"brier_score_loss": "Brier loss",
}
def qf_model(
input_shape: Tuple[int, int, int, int],
learning_rate: float = 0.001,
) -> keras.Sequential:
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(1024, activation="relu"))
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(512, activation="relu"))
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(128, activation="relu"))
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(2, activation="softmax"))
model.compile(
loss=keras.losses.mean_squared_error, # categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=["accuracy"],
)
return model
def my_model(
input_shape: Tuple[int, int, int, int],
learning_rate: float = 0.001,
) -> keras.Sequential:
""" """
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(
keras.layers.Conv2D(
64,
kernel_size=(3, 3),
activation="relu",
input_shape=input_shape,
data_format="channels_last",
padding="same",
)
)
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(1024, activation="relu"))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(512, activation="relu"))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(128, activation="relu"))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(2, activation="softmax"))
model.compile(
loss=keras.losses.mean_squared_error, # categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=["accuracy"],
)
return model
def load_syn_data(
data_files: Optional[Dict[str, List[str]]] = None,
data_types: Optional[List[str]] = None,
for_CNN: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
""""""
if data_files is None:
# data_files = {
# 'qflow': ['qflow_data_large.npy'],
# 'capa': ['noiseless_data.npy'],
# }
data_files = {
"qflow": [
"augmented_qf_data1.npy",
"augmented_qf_data2.npy",
"augmented_qf_data3.npy",
],
"capa": [
"augmented_cm_data1.npy",
"augmented_cm_data2.npy",
"augmented_cm_data3.npy",
],
}
else:
if not all(elem in data_files.keys() for elem in ["qflow", "capa"]):
print('data_files must contain following keys: "qflow", "capa".')
raise ValueError
if data_types is None:
data_types = ["signal"]
qf_data, qf_labels = _load_data(
data_files["qflow"],
data_types=data_types,
)
qf_data = qf_data * 2
cm_data, cm_labels = _load_data(
data_files["capa"],
data_types=data_types,
)
cm_data = cm_data * 0.6
syn_data = np.concatenate((qf_data, cm_data), axis=0)
syn_labels = np.concatenate((qf_labels, cm_labels), axis=0)
p = np.random.permutation(len(syn_labels))
syn_data = syn_data[p]
syn_labels = syn_labels[p]
if not for_CNN and len(data_types) == 2:
syn_labels = np.argmax(syn_labels, axis=1)
m = syn_labels.shape[0]
syn_curr = syn_data[:, :, :, 0].reshape(m, -1)
syn_freq = syn_data[:, :, :, 1].reshape(m, -1)
syn_data = np.concatenate((syn_curr, syn_freq), axis=1)
else:
logger.warning(
"No data reshaping for parametric binary classifiers" + " was performed."
)
return syn_data, syn_labels
def load_exp_data(
which: List[str],
data_files: Optional[Dict[str, List[str]]] = None,
data_types: Optional[List[str]] = None,
for_CNN: bool = True,
) -> List[Tuple[np.ndarray, np.ndarray]]:
""""""
if data_files is None:
# data_files = {
# 'clean': ['clean_exp_dots.npy'],
# 'good': ['exp_dots_corrected.npy'],
# 'bad': ['exp_dots_corrected.npy'],
# 'good_minus_clean': ['exp_dots_minus_clean.npy'],
# 'good_and_bad': ['exp_dots_corrected.npy'],
# 'good_and_bad_minus_clean': None,
# }
data_files = {
"clean": [
"augmented_clean_exp_dots1.npy",
"augmented_clean_exp_dots2.npy",
],
"good": [
"augmented_exp_dots_corrected1.npy",
"augmented_exp_dots_corrected2.npy",
"augmented_exp_dots_corrected3.npy",
],
"bad": [
"augmented_exp_dots_corrected1.npy",
"augmented_exp_dots_corrected2.npy",
"augmented_exp_dots_corrected3.npy",
],
"good_minus_clean": [
"augmented_exp_dots_minus_clean1.npy",
"augmented_exp_dots_minus_clean2.npy",
"augmented_exp_dots_minus_clean3.npy",
],
"good_and_bad": [
"augmented_exp_dots_corrected1.npy",
"augmented_exp_dots_corrected2.npy",
"augmented_exp_dots_corrected3.npy",
],
"good_and_bad_minus_clean": [],
}
if data_types is None:
data_types = ["signal"]
exp_data_all = []
for dtype in which:
if dtype == "good_and_bad":
all_data, all_labels = _load_good_and_poor(
data_files[dtype], data_types=data_types
)
exp_data_all.append((all_data, all_labels))
elif dtype == "bad":
all_data, all_labels = _load_data(
data_files[dtype], data_types=data_types, relevant_labels=[0, 2]
)
exp_data_all.append((all_data, all_labels))
elif dtype == "good_and_bad_minus_clean":
if data_files["good_and_bad_minus_clean"] is None:
f_name = data_files["good_minus_clean"]
else:
f_name = data_files["good_and_bad_minus_clean"]
all_data, all_labels = _load_good_and_poor(f_name, data_types=data_types)
exp_data_all.append((all_data, all_labels))
elif dtype in ["clean", "good", "good_minus_clean"]:
# not in ['good_and_bad', 'good_and_bad_minus_clean', 'bad']:
data, labels = _load_data(
data_files[dtype],
data_types=data_types,
)
exp_data_all.append((data, labels))
else:
logger.error("Trying to load unknown data.")
if not for_CNN and len(data_types) == 2:
for idd, sub_data in enumerate(exp_data_all):
data = sub_data[0]
labels = sub_data[1]
labels = np.argmax(labels, axis=1)
m = labels.shape[0]
curr = data[:, :, :, 0].reshape(m, -1)
freq = data[:, :, :, 1].reshape(m, -1)
data = np.concatenate((curr, freq), axis=1)
exp_data_all[idd] = (data, labels)
else:
logger.warning(
"No data reshaping for parametric binary classifiers" + " was performed."
)
return exp_data_all
def _load_good_and_poor(
filenames: List[str],
data_types: Optional[List[str]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
""""""
if data_types is None:
data_types = ["signal"]
if isinstance(filenames, str):
filenames = [filenames]
singledots, single_labels = _load_data(
filenames,
data_types=data_types,
regime="singledot",
)
doubledots, double_labels = _load_data(
filenames,
data_types=data_types,
regime="doubledot",
)
single_labels = np.argmax(single_labels, axis=1)
double_labels = np.argmax(double_labels, axis=1)
n_each = int(np.min([len(single_labels), len(double_labels)]))
sd_ids = np.random.choice(n_each, n_each, replace=False).astype(int)
dd_ids = np.random.choice(n_each, n_each, replace=False).astype(int)
singledot = singledots[sd_ids]
sd_labels = np.zeros(n_each, dtype=int)
doubledot = doubledots[dd_ids]
dd_labels = np.ones(n_each, dtype=int)
all_data = np.concatenate((singledot, doubledot), axis=0)
all_labels = np.concatenate((sd_labels, dd_labels), axis=0)
p = np.random.permutation(len(all_labels))
all_data = all_data[p]
all_labels = all_labels[p]
all_labels = keras.utils.to_categorical(all_labels)
return all_data, all_labels
def _load_data(
files: List[str],
regime: str = "dotregime",
data_types: Optional[List[str]] = None,
shuffle: bool = True,
relevant_labels: Optional[List[int]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Load data from multiple data files but do it seperaterly to ensure
'select_equal_populations' won't accidentially
select data mainly from one file
"""
if data_types is None:
data_types = ["signal", "frequencies"]
data = np.empty([0, 50, 50, len(data_types)])
labels = np.empty([0])
for dfile in files:
data_loader = Classifier(
[dfile],
regime,
data_types=data_types,
relevant_labels=relevant_labels,
)
(sub_data, sub_labels) = data_loader.select_equal_populations(
data_loader.original_data, data_loader.labels
)
m = sub_data.shape[0]
if len(data_types) > 2:
raise NotImplementedError
if len(data_types) == 2:
data_sig = sub_data[:, :2500].reshape(m, 50, 50, 1)
data_frq = sub_data[:, 2500:].reshape(m, 50, 50, 1)
sub_data = np.concatenate((data_sig, data_frq), axis=3)
# print(sub_data.shape)
# print(data.shape)
if len(data_types) == 1:
sub_data = sub_data.reshape(m, 50, 50, 1)
data = np.concatenate((data, sub_data), axis=0)
labels = np.concatenate((labels, sub_labels), axis=0)
if shuffle:
p = np.random.permutation(len(labels))
data = data[p]
labels = labels[p]
labels = keras.utils.to_categorical(labels)
return data, labels
def select_equal_populations(
data: np.ndarray,
labels: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Make sure we have 50% of one and 50% of other population
"""
# self.data_to_use = copy.deepcopy(self.original_data)
populations_labels, population_counts = np.unique(labels, return_counts=True)
n_each = int(np.min(population_counts))
new_data = np.empty([n_each * len(populations_labels), data.shape[-1]])
new_labels = np.empty(n_each * len(populations_labels), int)
for ii, label in enumerate(populations_labels):
idx = np.where(labels == int(label))
idx = np.random.choice(idx[0], n_each, replace=False)
idx = idx.astype(int)
dat = data[idx]
new_data[ii * n_each : (ii + 1) * n_each] = dat
label_array = np.ones(n_each, dtype=int) * int(label)
new_labels[ii * n_each : (ii + 1) * n_each] = label_array
p = np.random.permutation(len(new_labels))
return new_data[p], new_labels[p]
def print_data_stats(data, labels) -> None:
print("number of samples: {}".format(data.shape[0]))
print(
"populations (number and count): {}, {}".format(
*np.unique(labels, return_counts=True)
)
)
print("\n")
print("max value: {}".format(np.max(data)))
print("min value: {}".format(np.min(data)))
print("std: {}".format(np.std(data)))
print("median: {}".format(np.median(data)))
a = np.hstack(data[:500].flatten())
# _ = plt.hist(a, bins=100, range=[np.min(data), np.min(data) + 2*np.std(data)]) # arguments are passed to np.histogram
_ = plt.hist(a, bins=100, range=[np.min(data), 1])
plt.title("Histogram qf_data")
plt.show()
def feature_combination_metrics(
classifier: str,
data_filenames: List[str],
category: str,
metric: str = "accuracy_score",
filename: Optional[str] = None,
classifier_parameters: Dict[str, Union[str, float, int]] = {},
feature_indexes: List[int] = [0],
n_iter: int = 75,
) -> Dict[str, Any]:
""""""
# TODO: Fix this method. It is broken. Has not been used for a while
# n_feat = len(feature_indexes)
# scores: List[str] = []
# for k in range(1, n_feat+1):
# f_indx = itertools.combinations(range(1, n_feat+1), k)
# for f_combo in feature_indexes:
# qclf = Classifier(data_filenames,
# category,
# classifier=classifier,
# hyper_parameters=classifier_parameters,
# data_types=['features'],
# feature_indexes=list(f_combo),
# )
# infos = qclf.compute_metrics(n_iter=n_iter)
# features_str = ''
# sub_feat = [features[f] for f in f_combo]
# scores.append([', '.join(sub_feat), infos[metric]['mean'],
# infos[metric]['std']])
# info_dict = {
# 'stage': category,
# 'classifier': qclf.clf_type,
# 'classifier_parameters': qclf.clf.get_params(),
# 'n_iter': n_iter,
# 'data_files': qclf.file_paths,
# 'scores': scores,
# 'metric': metric,
# }
# if filename is None:
# filename = qclf.clf_type + '_' + metric + '.json'
# path = os.path.join(nt.config['db_folder'], category + '_features_metrics')
# if not os.path.exists(path):
# os.makedirs(path)
# path = os.path.join(path, filename)
# with open(path, 'w') as f:
# json.dump(info_dict, f)
# return info_dict
logger.warning("feature_combination_metrics under construction")
return {}
# def feature_metric_to_latex(directory: str,
# filenames: List[str],
# tables_folder: str) -> None:
# """
# """
# metric = 'accuracy_score'
# classifiers = ['SVC_rbf', 'SVC_linear', 'MLPClassifier']
# stage = 'po'
# directory = '/Users/jana/Documents/code/nanotune/measurements/databases/' + stage + '_features_metrics'
# for classifier in classifiers:
# filename = classifier + '_' + metric + '.json'
# path = os.path.join(directory, filename)
# with open(path) as f:
# feat_data = json.load(f)
# header = ['features', 'mean ' + metric, 'std']
# scores = sorted(feat_data['scores'], key=itemgetter(1), reverse=True)[0:40]
# df = pd.DataFrame(scores)
# filepath = os.path.join(tables_folder, stage + '_' + classifier + '_' + metric +'.tex')
# with open(filepath, 'w') as tf:
# with pd.option_context("max_colwidth", 1000):
# tf.write(df.to_latex(index=False,
# formatters=[dont_format, format_float,
# format_float],
# header=header,
# column_format='lcc').replace('\\toprule', '\\hline').replace('\\midrule', '\\hline').replace('\\bottomrule','\\hline'))
def performance_metrics_to_latex(
tables_directory: str,
metric: str = "accuracy_score",
file_directory: Optional[str] = None,
) -> None:
""""""
categories: Dict[str, Tuple[str, List[List[str]]]] = {
"pinchoff": (
"pinchoff",
[["signal"], ["frequencies"], ["frequencies", "signal"], ["features"]],
),
"singledot": ("dots", [["signal"], ["frequencies"], ["signal", "frequencies"]]),
"doubledot": ("dots", [["signal"], ["frequencies"], ["frequencies", "signal"]]),
"dotregime": ("dots", [["signal"], ["frequencies"], ["frequencies", "signal"]]),
}
classifiers = [
"DecisionTreeClassifier",
"GaussianProcessClassifier",
"KNeighborsClassifier",
"LogisticRegression",
"MLPClassifier",
"QuadraticDiscriminantAnalysis",
"RandomForestClassifier",
"SVC",
]
if file_directory is None:
file_directory = os.path.join(nt.config["db_folder"], "classifier_metrics")
header2 = [
"classifier ",
metric_mapping[metric],
"evaluation time [s]",
metric_mapping[metric],
"evaluation time [s]",
]
for category, settings in categories.items():
data_file = settings[0]
data_types = settings[1]
for data_type in data_types:
scores = []
base_pattern = data_file + "_" + category + "*"
all_files = glob.glob(os.path.join(file_directory, base_pattern))
pattern = "_".join(data_type)
rel_files = [f for f in all_files if pattern in f]
for d_type in nt.config["core"]["data_types"]:
if d_type not in data_type:
print(d_type)
rel_files = [f for f in rel_files if d_type not in f]
for classifier in classifiers:
clf_files = [f for f in rel_files if classifier in f]
sub_score = [classifier]
for pca_setting in ["no_PCA", "PCA."]:
if pca_setting == "PCA.":
files = [f for f in clf_files if pca_setting in f]
else:
files = [f for f in clf_files if "PCA" not in f]
if len(files) > 1:
print("error")
print(files)
with open(files[0]) as json_file:
data = json.load(json_file)
sub_score.extend(
[
"{0:.3f}".format(float(data[metric]["mean"]))
+ " $\pm$ "
+ "{0:.3f}".format(float(data[metric]["std"])),
format_time(float(data["mean_test_time"]))
+ " $\pm$ "
+ format_time(float(data["std_test_time"])),
]
)
scores.append(sub_score)
df =
|
pd.DataFrame(scores)
|
pandas.DataFrame
|
import os
import glob
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
import helper
class Matcher:
def __init__(self, L, prior_dir=None, count_dir=None, matched=None):
self.prior_dir = prior_dir
self.count_dir = count_dir
self.L = L
self.info = self.L.LOG
self.debug = self.L.debug
self.fields = list(self.L.parent_of_fields.Parent.unique())
assert(len(self.fields) == 19)
if matched is None:
self.load_matched()
else:
self.matched = matched.copy()
self.log_stats('')
def log_stats(self, msg):
self.info.log(f'{self.matched.shape} {self.matched.EditorsNewId.nunique()} {self.matched[["EditorsNewId","issn"]].drop_duplicates().shape} {msg}')
def load_matched(self):
self.all_matched = []
for field in tqdm(self.fields):
try:
matched_count, matched_prior =
|
pd.DataFrame()
|
pandas.DataFrame
|
'''
Generates the following features -
1. TF
2. TF-IDF
...
'''
import pandas as pd
import json
import csv
import numpy as np
from imblearn.over_sampling import RandomOverSampler
class Labels(object):
def __init__(self):
# Labels dict has - (term, label) pairs
self.labels = {}
class Features(object):
def __init__(self):
''''''
# Features as - 1. tf 2. idf ...
def calculateTF(self, data):
'''
Calculates tf for all words in the file
:return: tf vector for all words as well as stores it in self.tf
Feel free to change this as you see fit!
'''
grouped_data = data[['docID', 'term', 'label']]
grouped_data = grouped_data.groupby(['docID', 'term'], as_index=False)['label'].count()
grouped_data.columns = ['docID', 'term', 'tf']
merged = pd.merge(left=data, right=grouped_data, on=['docID', 'term'], how='inner')
return merged
def calculateIDF(self, data):
'''
Calculates idf for all the words in the file
:param filename:
:return: idf dict for all words. stores them as well
'''
select_data = data[['term', 'docID']]
df = select_data.groupby(['term'], as_index=False)['docID'].count()
df['docID'] = 1.0/df['docID']
df.columns = ['term', 'idf']
merged = pd.merge(left=data, right=df, on=['term'], how='inner')
return merged
def pos_before(self, row, window_size):
# Need to add support for window size > 1
return row['position'] - len(row['term'].split(' ')) - window_size
def pos_after(self, row, window_size):
# Need to add support for window size > 1
return row['position'] + 1 + window_size
def getPosBeforeAfter(self, win_size_before, win_size_after, data, data_orig):
data['pos_before'] = pd.Series(np.random.randn(len(data)), index=data.index)
data['pos_after'] = pd.Series(np.random.randn(len(data)), index=data.index)
data['pos_before'] = data.apply(lambda row: self.pos_before(row, win_size_before), axis=1)
data['pos_after'] = data.apply(lambda row: self.pos_after(row, win_size_after), axis=1)
data_single = data_orig[data_orig['term'].apply(lambda x: len(x.split(' ')) == 1)]
merged_before = pd.merge(left=data, right=data_single, left_on=['docID', 'pos_before'],
right_on=['docID', 'position'], how='inner')
merged_before = merged_before.drop(
columns=['pos_before', 'pos_after', 'position_y', 'label_y'])
merged_after = pd.merge(left=data, right=data_single, left_on=['docID', 'pos_after'],
right_on=['docID', 'position'], how='inner')
merged_after = merged_after.drop(
columns=['pos_before', 'pos_after', 'position_y', 'label_y'])
merged_all =
|
pd.merge(left=merged_before, right=merged_after, on=['docID', 'term_x', 'position_x', 'label_x'], how='inner')
|
pandas.merge
|
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from staircase.docstrings import examples
from staircase.util import _is_datetime_like
from staircase.util._decorators import Appender
# capable of single or vector
@Appender(examples.sample_example, join="\n", indents=1)
def sample(self, x, include_index=False):
"""
Evaluates the value of the step function at one, or more, points.
The function can be called using parentheses. See example below.
Parameters
----------
x : int, float or vector data
Values at which to evaluate the function
include_index : bool, default False
Indicates if the values returned should be a :class:`numpy.ndarray`, or in a :class:`pandas.Series`
indexed by the values in *x*
Returns
-------
float, :class:`numpy.ndarray`, :class:`pandas.Series`
See Also
--------
Stairs.limit
staircase.sample
"""
side = "right" if self._closed == "left" else "left"
values = limit(self, x, side)
if include_index:
if not is_list_like(x):
x = [x]
values = pd.Series(values, index=x)
return values
@Appender(examples.limit_example, join="\n", indents=1)
def limit(self, x, side, include_index=False):
"""
Evaluates the limit of the step function as it approaches one, or more, points.
The results of this function should be considered as :math:`\\lim_{x \\to z^{-}} f(x)`
or :math:`\\lim_{x \\to z^{+}} f(x)`, when side = 'left' or side = 'right' respectively. See
:ref:`A note on interval endpoints<getting_started.interval_endpoints>` for an explanation.
Parameters
----------
x : int, float or vector data
Values at which to evaluate the function
side : {'left', 'right'}, default 'right'
If points where step changes occur do not coincide with x then this parameter
has no effect. Where a step change occurs at a point given by x, this parameter
determines if the step function is evaluated at the interval to the left, or the right.
include_index : bool, default False
Indicates if the values returned should be a :class:`numpy.ndarray`, or in a :class:`pandas.Series`
indexed by the values in *x*
Returns
-------
float, :class:`numpy.ndarray`, :class:`pandas.Series`
See Also
--------
Stairs.sample
staircase.sample
"""
assert side in ("left", "right")
passed_x = x
if self._data is None:
if pd.api.types.is_list_like(x):
return self.initial_value * np.ones_like(x, dtype=float)
else:
return self.initial_value
amended_values = np.append(
self._get_values().values, [self.initial_value]
) # hack for -1 index value
if pd.api.types.is_list_like(x) and _is_datetime_like(next(iter(x))):
x = pd.Series(x).values # faster, but also bug free in numpy
elif _is_datetime_like(x):
x =
|
pd.Series([x])
|
pandas.Series
|
#
# Copyright (C) 2021 The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from decimal import Decimal
from typing import Any, Callable, Dict
import numpy as np
import pandas as pd
def _get_dummy_column(schema_type):
"""
Return a dummy column with the data type specified in schema_type.
The dummy column is used to populate the dtype fields in empty tables.
:param schema_type: str or json representing a data type
:return: dummy pandas Series to be inserted into an empty table
"""
if schema_type == "boolean":
return pd.Series([False])
elif schema_type == "byte":
return pd.Series([0], dtype="int8")
elif schema_type == "short":
return pd.Series([0], dtype="int16")
elif schema_type == "integer":
return pd.Series([0], dtype="int32")
elif schema_type == "long":
return
|
pd.Series([0], dtype="int64")
|
pandas.Series
|
import streamlit as st
import numpy as np
import pandas as pd
import sqlite3
conn=sqlite3.connect('data.db')
c=conn.cursor()
import os
import warnings
warnings.filterwarnings('ignore')
import tensorflow.keras as tf
import joblib
import base64
from io import BytesIO
ratings_1=pd.read_csv("ratings_1.csv")
ratings_2=pd.read_csv("ratings_2.csv")
ratings_3=pd.read_csv("ratings_3.csv")
ratings_4=pd.read_csv("ratings_4.csv")
ratings_5=pd.read_csv("ratings_5.csv")
ratings_df_list=[ratings_1,ratings_2,ratings_3,ratings_4,ratings_5]
ratings_df=pd.concat(ratings_df_list)
del ratings_1,ratings_2,ratings_3,ratings_4,ratings_5,ratings_df_list
new_model=tf.models.load_model("modelrecsys.h5")
co=joblib.load("contentsfile.joblib")
titlefile=joblib.load('title.joblib')
####To download dataframe recommondations
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
#Generates a link allowing the data in a given panda dataframe to be downloaded
#in: dataframe
#out: href string
val = to_excel(df)
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="extract.xlsx">Download csv file</a>' # decode b'abc' => abc
##df = ... # your dataframe
##st.markdown(get_table_download_link(df), unsafe_allow_html=True)
def create_usertable():
c.execute('CREATE TABLE IF NOT EXISTS userstable(username TEXT, password TEXT)')
def add_userdata(username,password):
c.execute('INSERT INTO userstable(username, password) VALUES(?,?)',(username,password))
conn.commit()
def login_user(username,password):
c.execute('SELECT * FROM userstable WHERE username=? AND password=?',(username,password))
data=c.fetchall()
return data
def view_all_users():
c.execute('SELECT * FROM userstable')
data=c.fetchall()
return data
st.title("...WELCOME...")
st.title("HYBRID BOOK RECOMMENDATION SYSTEM")
menu=["Home","Login", "Sign up","Book"]
choice=st.sidebar.selectbox("Menu",menu)
if choice=="Home":
st.subheader("HOME")
elif choice=="Login":
st.subheader("Login Section")
username=st.sidebar.text_input("username")
password=st.sidebar.text_input("password",type='password')
if st.sidebar.checkbox("Login"):
# if password=="<PASSWORD>":
create_usertable()
result=login_user(username,password)
if result:
st.success("LOGGED IN SUCCESSFULLY AS {} ".format(username))
task=st.selectbox("Task",["Help","Start-Analytics","Profile"])
if task=="Help":
st.subheader("use Start-Analytics for Reccomondations")
elif task=="Start-Analytics":
st.subheader("Top N number of Book Recommondations predicted realtime")
#user_id = st.number_input('user_id', min_value=1, max_value=53424, value=1)
user_id=st.text_input("Enter user_id {1-53424} default 1")
if user_id!="":
user_id=int(user_id)
if user_id<1 or user_id>53424:
user_id=1
else:
user_id=1
us_id_temp=[user_id for i in range(len(co['book_id']))]
reccom = new_model.predict([pd.Series(us_id_temp),co['book_id'],co.iloc[:,1:]])
recc_df=
|
pd.DataFrame(reccom,columns=["rating"])
|
pandas.DataFrame
|
import orca
import pandas as pd
import os
import asim_utils
import asim_simulate
import asim_misc
import skim as askim
import tracing
import openmatrix as omx
################################
# from asim.abm.tables.landuse #
################################
@orca.table()
def land_use(asim_store):
df = asim_store["land_use/taz_data"]
print("loaded land_use %s" % (df.shape,))
# replace table function with dataframe
orca.add_table('land_use', df)
return df
###################################
# from asim.abm.tables.size_terms #
###################################
@orca.table()
def size_terms(configs_dir):
f = os.path.join(configs_dir, 'destination_choice_size_terms.csv')
return
|
pd.read_csv(f, index_col='segment')
|
pandas.read_csv
|
import csv
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import statsmodels.api as sm
df = pd.read_csv("./[Track1_데이터3] samp_cst_feat.csv")
X = df[1:]
df = df.drop(1,0)
data=[]
f = open('./[Track1_데이터3] samp_cst_feat.csv','r')
rdr = csv.reader(f)
for i in rdr:
data.append(i)
a=data[0]
del a[0]
df2 =
|
pd.read_csv("./[Track1_데이터2] samp_train.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with
|
tm.assert_raises_regex(Exception, msg)
|
pandas.util.testing.assert_raises_regex
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# General imports
import numpy as np
import pandas as pd
import os, sys, gc, time, warnings, pickle, psutil, random
from math import ceil
from sklearn.preprocessing import LabelEncoder
#warnings.filterwarnings('ignore')
import json
with open('SETTINGS.json', 'r') as myfile:
datafile=myfile.read()
SETTINGS = json.loads(datafile)
data_path = SETTINGS['RAW_DATA_DIR']
save_data_path = SETTINGS['PROCESSED_DATA_DIR']
def get_memory_usage():
return np.round(psutil.Process(os.getpid()).memory_info()[0]/2.**30, 2)
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
## Merging by concat to not lose dtypes
def merge_by_concat(df1, df2, merge_on):
merged_gf = df1[merge_on]
merged_gf = merged_gf.merge(df2, on=merge_on, how='left')
new_columns = [col for col in list(merged_gf) if col not in merge_on]
df1 = pd.concat([df1, merged_gf[new_columns]], axis=1)
return df1
################################### FE ###############################################################
########################### Vars
#################################################################################
TARGET = 'sales' # Our main target
END_TRAIN = 1941 #1913 + 28 # Last day in train set
MAIN_INDEX = ['id','d'] # We can identify item by these columns
########################### Load Data
#################################################################################
print('Load Main Data')
# Here are reafing all our data
# without any limitations and dtype modification
train_df = pd.read_csv(data_path+'sales_train_evaluation.csv')
# train_df = pd.read_csv(data_path+'sales_train_validation.csv')
prices_df =
|
pd.read_csv(data_path+'sell_prices.csv')
|
pandas.read_csv
|
# power_ratings_v0.3.py
# Last updated: 29 August 2020
# https://github.com/JohnBurant/COVID19-Model-Power-Ratings
# Timeframe choices
first_week = 17
last_week = 35
nweeks = 3 # Input as 0-indexed, i.e., how many we actually want - 1
Nroll = 4 # Number of weeks to use for rolling average overall power rating (N->inf = lifetime, N->1 identical to weekly rating)
# Policy choices
# See discussion on github site
d_policy = {'include_baselines': True,
'include_ensemble' : True,
'include_models_without_full_measure_set': False,
'include_models_without_all_weeks': False,
'generate_partial_results_for_recent_weeks': False,
'num_forecasts_for_lifetime_list_eligibility': 4
}
working_dir = 'EDIT_ME' # Local path of this repo
yyg_repo_dir = 'EDIT_ME' # path to clone of https://github.com/youyanggu/covid19-forecast-hub-evaluation
eval_dir = yyg_repo_dir + 'evaluations/'
output_dir = working_dir + 'results/'
import pandas as pd
import numpy as np
import epiweeks
import datetime
import os
import altair as alt
idx = pd.IndexSlice
def power_score(s_in):
'''
Given a pd.Series of error measures, return a (sorted) pd.Series w/ power ratings.
Power rating is as follows:
The minimum error is assigned a power rating of 100;
The median error is assigned a power rating of 50;
Remaining errors are distributed proportionately according to how they fall on the scale defined by min->100, median->50;
Any errors that would result in a power rating < 0 are set = 0.
'''
s = s_in.sort_values()
err_range = 2*(s.median()-s.min())
s_scaled = (err_range - s + s.min()).clip(0)
s_scaled = 100*s_scaled/s_scaled[0]
return s_scaled
def say(st,l_outs,save=True,term='\n'):
'''
Given a string st, a list l_outs, a boolean save and an options string term:
print st, using term asthe argument end of print function
if save=True append st to l_outs
'''
print(st,end=term)
if save: l_outs.append(st)
l_outs = []
say('Beginning generation of power ratings from raw evaluation files.',l_outs)
datestamp = datetime.datetime.today().strftime('%Y-%m-%d')
outstring = 'Datestamp: '+datestamp
say(outstring,l_outs)
say('Following these policies:',l_outs)
for k, v in d_policy.items():
outstring = ' '+str(k)+': '+str(v)
say(outstring,l_outs)
# Create some handy dicts to move between YYYY-MM-DD and epiweek
d_epiweeks = {i: epiweeks.Week(2020,i) for i in range(first_week,last_week+1)}
d_wk_to_enddate = {k: w.enddate().strftime('%Y-%m-%d') for k, w in d_epiweeks.items()}
d_enddate_to_wk = {v: k for k, v in d_wk_to_enddate.items()}
d_wk_to_startdate = {k: (w.startdate()+datetime.timedelta(days=1)).strftime('%Y-%m-%d') for k, w in d_epiweeks.items()}
d_startdate_to_wk = {v: k for k, v in d_wk_to_startdate.items()}
# For each starting week, create list of week pairs that are (starting_week,ending_week). Store as dict.
d_week_pairs = {i: [(j,i) for j in range(first_week,last_week+1) if i >= j] for i in range(first_week,last_week+1)}
# Also do this with the full string YYYY-MM-DD version
d_week_pairs_str = {}
for k, v in d_week_pairs.items():
d_week_pairs_str[d_wk_to_enddate[k]] = [d_wk_to_startdate[tup[0]]+'_'+d_wk_to_enddate[tup[1]] for tup in v]
# Read in all the files in the evaluations directory
# We could just glob but curating it slightly might make the manipulations easier
# Note we make the switch from YYYY-MM-DD to integer weeks right here.
# Format {filename_base: {'cols': list of columns to keep, 'short_name': what we call the measure}
d_files_measures_cols = {'states_abs_errs': {'cols' : ['mean'], 'short_name': 'state_abs'},
'states_sq_errs': {'cols' : ['mean'], 'short_name': 'state_sq'},
'states_mean_ranks': {'cols' : ['mean_rank'], 'short_name': 'state_rank'},
'us_errs': {'cols': ['perc_error'], 'short_name': 'us_tot'}
}
measures_used = d_files_measures_cols.keys()
measures_used_str = 'Measures used: '+', '.join(list(measures_used))
say(measures_used_str,l_outs)
say('Loading evaluation files...',l_outs,term='')
l_dfs = []
l_files_not_found = [] # Can inspect this later if we're curious
ict = 0
for meas, vd in d_files_measures_cols.items():
cols_to_keep = vd['cols']
short_name = vd['short_name']
for k, v in d_week_pairs_str.items():
for pr in v:
ict += 1
fstring = k+'/'+pr+'_'+meas+'.csv'
full_path_to_file = eval_dir+fstring
# Before we try to read the file check if it exists?
if os.path.isfile(full_path_to_file):
df = pd.read_csv(full_path_to_file)
df = df[['Unnamed: 0', *cols_to_keep]]
df = df.set_index('Unnamed: 0')
date_pr = pr.split('_')
int_weeks = [d_startdate_to_wk[date_pr[0]], d_enddate_to_wk[date_pr[1]]]
cols_mi = pd.MultiIndex.from_tuples([(short_name+'_'+c, int_weeks[0], int_weeks[1]) for c in cols_to_keep],
names=['measure','start_wk','end_wk'])
df.columns = cols_mi
l_dfs.append(df)
else:
l_files_not_found.append(full_path_to_file)
say('Done',l_outs,save=False)
outstring = 'Number of possible week-pair files: '+str(ict)
say(outstring,l_outs)
outstring = 'Number of files processed: '+str(len(l_dfs))
say(outstring,l_outs)
df_raw = pd.concat(l_dfs,axis=1)
df_raw = df_raw.dropna(axis=0,how='all') # Some models show up but not for the metric we use (notably: UChicago, which did only Illinois)
df_raw.index.name = 'Model'
outstring = 'Shape of df_raw: '+str(df_raw.shape)
say(outstring,l_outs)
# Do some further processing into a new df
# df will be the main dataframe we'll work with that will contain start_wk/num_wk/model/measure (4-tuple) error measures
# Switch from first_wk/end_wk to first_wk/num_wks where num_wks counts how many weeks out from first_wk the predictions are, starting w/ 0.
df = df_raw.copy()
df = df.T.reset_index()
df['num_wks'] = df['end_wk'] - df['start_wk']
df = df.drop(columns=['end_wk'])
df = df.set_index(['start_wk','num_wks','measure'])
df = df.sort_index()
# Also, slight processing:
# (1) convert mean-square-errors into root-mean-square-errors (and rename the index)
# (2) take absolute value of percentage error on whole-US projection
# (1a) apply the sqrt
screen_sq_errs = df.index.get_level_values('measure').str.startswith('state_sq')
df.loc[screen_sq_errs,:] = df.loc[screen_sq_errs,:].applymap(np.sqrt)
# (1b) rename the measure; easier to do this way than dealing with the index object itself!
df = df.reset_index('measure')
df.loc[screen_sq_errs,'measure'] = 'states_rmse'
df = df.set_index('measure',append=True)
# (2)
screen_perc_errs = df.index.get_level_values('measure') == 'us_tot_perc_error'
df.loc[screen_perc_errs,:] = df.loc[screen_perc_errs,:].applymap(lambda x: x if pd.isna(x) else abs(float(x.strip('%'))))
# Also, a slightly nicer name for the state rank
df = df.reset_index('measure')
screen_state_rank = df['measure'] == 'state_rank_mean_rank'
df.loc[screen_state_rank,'measure'] = 'state_mean_rank'
df = df.set_index('measure',append=True)
# Now that we've fixed percentages, make everything a float
df = df.astype(float)
# Trim the weeks further into the future from the start_wk than we want to use, and sort the index for efficiency later
screen = df.index.get_level_values('num_wks') <= nweeks
df = df.loc[screen,:].sort_index()
# Optionally, take out baseline(s) COVIDhub ensemble from the power rating process depending on policy choices
l_excluded_models = ['Baseline'] # Always exclude YYG's 'Baseline' b/c we already have another baseline (COVIDhub-baseline) in here
if not d_policy['include_baselines']: l_excluded_models += ['COVIDhub-baseline']
if not d_policy['include_ensemble']: l_excluded_models += ['COVIDhub-ensemble']
cols = [c for c in df.columns if c not in l_excluded_models]
df = df[cols]
df.columns.name = 'Model' # This isn't carried through manipulations above?
outstring = 'Generating working df of shape: '+str(df.shape)
say(outstring,l_outs)
# Calculate the power ratings for each start_wk/num_wks/measure/Model 4-tuple, where the power rating is done over all models present for the start_wk/num_wks/measure 3-tuple
# df_power holds the power ratings
df_power = df.apply(power_score,axis=1)
df_power.columns.name = 'Model' # power_score doesn't know about name of series it returns
# Make some aggregations depending on policy choices
# dft is modified and used throughout; we make some worthwhile intermediate aggregations and store as we go.
# First, create a single power rating for each Model/start_wk/num_wks tuple (averaging over all measures)
dft = df_power.stack().unstack('measure')
if d_policy['include_models_without_full_measure_set']:
dft = dft.fillna(0,axis=1)
dft = dft.mean(skipna=False,axis=1) # It's actually a series at this point
df_power_model_wk_pair = dft.unstack('Model')
# Now, for each Model/start_wk 2-tuple, aggregate over the weeks (up to nweeks)
# First, count the number of weeks including/after start_wk for which there is any data for each model
s_model_wks = dft.unstack(level=['num_wks']).apply(lambda x: x.notna().sum(),axis=1)
# Now for each start week, how many weeks data are expected?
i = s_model_wks.index.get_level_values('start_wk').unique()
max_weeks_expected = nweeks + 1 - np.maximum(i-last_week+nweeks,0)
s_start_wk_max_wks = pd.Series(index=i,data=max_weeks_expected)
# Now check if the weeks we counted as having data is sufficient
screen_sufficient_weeks = (s_model_wks.unstack('Model').fillna(0).astype(int)
.apply(lambda x: x.values == s_start_wk_max_wks.values).stack('Model'))
i_model_wks = s_model_wks[screen_sufficient_weeks].index
# Finally, apply the policies and aggregation
dft = dft.unstack('num_wks')
if not d_policy['include_models_without_all_weeks']:
dft = dft.loc[i_model_wks,:]
dft = dft.mean(skipna=d_policy['generate_partial_results_for_recent_weeks'],axis=1).dropna(axis=0,how='all')
df_power_model_start_wk = dft.unstack('Model')
df_power_model_start_wk.index.name = 'start_wk'
df_power_model_start_wk.columns.name = 'Model'
# Create the rolling mean, and change index to be a little more intuitive
df_power_model_rolling_N_wks = df_power_model_start_wk.rolling(Nroll).mean().dropna(axis=0,how='all').T
df_power_model_rolling_N_wks.columns = [str(c-4+1)+'-'+str(c) for c in df_power_model_rolling_N_wks.columns]
df_power_model_rolling_N_wks.columns.name = 'start_wk_range'
outstring = 'Using '+str(Nroll)+' weeks window for mean rolling power rating'
say(outstring,l_outs)
# Generate a Model's lifetime average power ranking, over a specified set of weeks (can be all weeks of course)
week_alpha = first_week
week_omega = max(df_power_model_start_wk.index)+1
weeks_included = list(range(week_alpha,week_omega))
outstring = 'Generating an average of power ratings for models over a span of '+str(len(weeks_included))+' weeks.'
say(outstring,l_outs)
outstring = 'First week: '+str(first_week)+' (i.e., week starting '+str(d_wk_to_startdate[first_week])+')'
say(outstring,l_outs)
outstring = ' Covering the period of first week plus an additional '+str(nweeks)+' weeks (i.e., a total of '+str(nweeks+1)+' weeks)'
say(outstring,l_outs)
# Add count of forecasts as column (so new arrivals are properly seen as such, compared with long-standing models)
#df_power_model = df_power_model_start_wk.loc[weeks_included,:].mean(axis=0).sort_values(ascending=False)
df_power_model = df_power_model_start_wk.loc[weeks_included,:].stack().groupby('Model').agg(['count', 'mean']).rename(columns={'count': 'num_forecasts_in_lifetime', 'mean': 'mean_power_rating'}).sort_values('mean_power_rating',ascending=False)
df_power_model = df_power_model[df_power_model['num_forecasts_in_lifetime'] >= d_policy['num_forecasts_for_lifetime_list_eligibility']]
df_power_model.name = 'Power_Rating'
# Also look at national-only models
has_national = df_power.loc[idx[:,:,'us_tot_perc_error'],:].notna().droplevel('measure')
has_all_state = df_power.loc[idx[:,:,['state_abs_mean','state_mean_rank','states_rmse']],:].notna().groupby(['start_wk','num_wks']).sum().astype(int)==3
is_national_only = has_national & ~ has_all_state
national_only_week_has_4plus_weeks = (is_national_only.groupby('start_wk').sum().astype(int)>0).T.sum(axis=1).astype(int)>4
l_national_only_models = national_only_week_has_4plus_weeks[national_only_week_has_4plus_weeks].index.to_list()
# Build the power measure df for just the national measures; filter according to policy and add indication about whether a model is national-only
screen_us_meas = df_power.index.get_level_values('measure')=='us_tot_perc_error'
dfx = df_power[screen_us_meas].droplevel('measure') # .groupby('start_wk') .mean().T
# Note: This code is duplicated above when used for the same purpose to build df_power_model_start_wk
s_model_wks = dfx.stack().unstack('num_wks').apply(lambda x: x.notna().sum(),axis=1)
# Now for each start week, how many weeks data are expected?
i = s_model_wks.index.get_level_values('start_wk').unique()
max_weeks_expected = nweeks + 1 - np.maximum(i-last_week+nweeks,0)
s_start_wk_max_wks = pd.Series(index=i,data=max_weeks_expected)
# Now check if the weeks we counted as having data is sufficient
screen_sufficient_weeks = (s_model_wks.unstack('Model').fillna(0).astype(int)
.apply(lambda x: x.values == s_start_wk_max_wks.values).stack('Model'))
i_model_wks = s_model_wks[screen_sufficient_weeks].index
dfx = dfx.stack().unstack('num_wks')
if not d_policy['include_models_without_all_weeks']:
dfx = dfx.loc[i_model_wks,:]
dfx = dfx.mean(skipna=d_policy['generate_partial_results_for_recent_weeks'],axis=1).dropna(axis=0,how='all')
df_power_national_measure = dfx.unstack('Model')
df_power_national_measure.index.name = 'start_wk'
df_power_national_measure.columns.name = 'Model'
df_power_national_measure = df_power_national_measure.T
df_power_national_measure['national_only_model'] = df_power_national_measure.index.isin(l_national_only_models)
df_power_national_measure = df_power_national_measure.set_index('national_only_model',append=True)
# We can also look at models over their whole lifetime (or, of course, a segment of it) and calculate each model's power rating for each specific measure.
df_power_model_measure = df_power.stack().groupby(['Model','measure']).mean().unstack('measure')
# We can also look at the range (e.g., max-min) of the power ratings of a model for the various measures
df_power_model_measure.dropna(axis=0,how='any').apply(lambda x: x.max() - x.min(),axis=1).sort_values()
# We could also look at power rating by number of weeks out from initial forecast (i.e., agg by num_wks)
df_power_model_num_wks = df_power.stack().groupby(['Model','num_wks']).mean().unstack()
# Save the work.
output_filename_base = "covid_model_power_ratings"
output_file_week_str = 'start_wk_'+str(first_week)+'-num_wks_'+str(nweeks+1)
output_filename = output_filename_base+'-'+output_file_week_str+'-'+datestamp+'.xlsx'
full_path_to_output_file = output_dir+output_filename
# (Possibly more) complicated (than necessary) way to set up the Info worksheet
d_output_sheets = {1: {'df': df_power_model,
'sheet_name': 'Model_lifetime',
'desc': 'Power rating for each model over its full lifetime (single number for each model)'},
2: {'df': df_power_model_start_wk.T, # the transpose makes for better viewing
'sheet_name': 'Model_weekly',
'desc': 'Power ratings for each model for each week projections were submitted (single number for each model for each week)'},
3: {'df': df_power_model_rolling_N_wks,
'sheet_name': 'Model_rolling_N_wks',
'desc': 'Power ratings for each model averaged over a rolling N week window, N given in Info tab'},
4: {'df': df_power_model_measure,
'sheet_name': 'Model_measures',
'desc': 'Power ratings for each separate measure for each model over its full lifetime (n_measures number for each model)'},
5: {'df': df_power_model_num_wks,
'sheet_name': 'Model_num_wks',
'desc': 'Power ratings for each model for increasing 0, 1, 2, etc. (0-indexed) week periods since a projection was submitted, averaged over that projection\'s full lifetime (n_weeks numbers per model)'},
6: {'df': df_power_model_wk_pair,
'sheet_name': 'Model_week_pairs',
'desc': 'Power ratings for each model for increasing 0, 1, 2, etc. (0-indexed) week periods since a projection was submitted, for each week projections were submitted'},
7: {'df': df_power_national_measure,
'sheet_name': 'Model_natl_err_only_wks',
'desc': 'Power rating for the national death total error measure only; indicates if a model is a multi-state or national-only model'},
8: {'df': df_power,
'sheet_name': 'Raw_power_ratings',
'desc': 'Power ratings for every model, for each model/start_wk/num_wk/measure 4-tuple'}
}
l_sheet_names = [vd['sheet_name'] for vd in d_output_sheets.values()]
l_descs = [vd['desc'] for vd in d_output_sheets.values()]
df_index_sheet = pd.DataFrame(data=[l_sheet_names,l_descs],index=['Sheet name','Description']).T
outstring = 'Saving power ratings in '+output_filename+'...'
say(outstring,l_outs,term='')
df_info_sheet =
|
pd.DataFrame(l_outs)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from gamechangerml.src.utilities.text_utils import normalize_answer, normalize_query, get_tokens
from gamechangerml.src.utilities.test_utils import *
from gamechangerml.configs.config import ValidationConfig, TrainingConfig
from gamechangerml.api.utils.logger import logger
from gamechangerml.src.utilities.es_search_utils import get_paragraph_results, connect_es
from gamechangerml.src.utilities.test_utils import filter_date_range
ES_URL = 'https://vpc-gamechanger-iquxkyq2dobz4antllp35g2vby.us-east-1.es.amazonaws.com'
class ValidationData():
def __init__(self, validation_dir):
self.validation_dir = validation_dir
class SQuADData(ValidationData):
def __init__(
self,
validation_dir=ValidationConfig.DATA_ARGS['validation_dir'],
squad_path=ValidationConfig.DATA_ARGS['squad']['dev'],
sample_limit=None
):
super().__init__(validation_dir)
logger.info(f"Pulling validation data from {str(os.path.join(validation_dir, squad_path))}")
if not os.path.exists(os.path.join(validation_dir, squad_path)):
logger.warning("No directory exists for this validation data.")
self.dev = open_json(squad_path, validation_dir)
self.queries = self.get_squad_sample(sample_limit)
def get_squad_sample(self, sample_limit):
'''Format SQuAD data into list of dictionaries (length = sample size)'''
data_limit = len(self.dev['data'])
if sample_limit:
data_limit = np.min([data_limit, sample_limit])
par_limit = sample_limit // data_limit
else:
par_limit = np.max([len(d['paragraphs']) for d in self.dev['data']])
count = 0
queries = []
for p in range(par_limit):
for d in range(data_limit):
try:
base = self.dev['data'][d]['paragraphs'][p]
context = base['context']
questions = base['qas']
q_limit = np.min([2, par_limit, len(questions)])
for q in range(q_limit):
if count < sample_limit:
count += 1
mydict = {
"search_context": context,
"question": questions[q]['question'],
"id": questions[q]['id'],
"null_expected": questions[q]['is_impossible'],
"expected": questions[q]['answers']
}
queries.append(mydict)
else:
break
except:
pass
logger.info("Generated {} question/answer pairs from SQuAD dataset".format(len(queries)))
return queries
class QADomainData(ValidationData):
def __init__(
self,
validation_dir=ValidationConfig.DATA_ARGS['validation_dir'],
qa_gc_data_path=ValidationConfig.DATA_ARGS['question_gc']['queries']
):
super().__init__(validation_dir)
self.all_queries = open_json(qa_gc_data_path, self.validation_dir)
self.queries = self.check_queries()
def check_queries(self):
'''Check that in-domain examples contain expected answers in their context'''
checked = []
for test in self.all_queries['test_queries']:
alltext = normalize_query(' '.join(test['search_context']))
checked_answers = [i for i in test['expected'] if normalize_answer(i['text']) in alltext]
test['expected'] = checked_answers
if test['expected'] != []:
checked.append(test)
else:
logger.info("Could not add {} to test queries: answer not in context".format(test['question']))
logger.info("Generated {} question/answer pairs from in-domain data".format(len(checked)))
return checked
class MSMarcoData(ValidationData):
def __init__(
self,
validation_dir=ValidationConfig.DATA_ARGS['validation_dir'],
queries=ValidationConfig.DATA_ARGS['msmarco']['queries'],
collection=ValidationConfig.DATA_ARGS['msmarco']['collection'],
relations=ValidationConfig.DATA_ARGS['msmarco']['relations'],
metadata=ValidationConfig.DATA_ARGS['msmarco']['metadata']
):
super().__init__(validation_dir)
self.queries = open_json(queries, self.validation_dir)
self.collection = open_json(collection, self.validation_dir)
self.relations = open_json(relations, self.validation_dir)
self.metadata = open_json(metadata, self.validation_dir)
self.corpus = self.get_msmarco_corpus()
def get_msmarco_corpus(self):
'''Format MSMarco so it can be indexed like the GC corpus'''
return [(x, y, '') for x, y in self.collection.items()]
class RetrieverGSData(ValidationData):
def __init__(
self,
validation_dir,
available_ids,
gold_standard):
super().__init__(validation_dir)
self.samples = pd.read_csv(os.path.join(self.validation_dir, gold_standard), names=['query', 'document'])
self.queries, self.collection, self.relations = self.dictify_data(available_ids)
def dictify_data(self, available_ids):
'''
Filter out any validation queries whose documents areen't in the index.
Forrmat gold standard csv examples into MSMarco format.
'''
ids = ['.'.join(i.strip('\n').split('.')[:-1]).strip().lstrip() for i in available_ids]
self.samples['document'] = self.samples['document'].apply(lambda x: [i.strip().lstrip() for i in x.split(';')])
self.samples = self.samples.explode('document')
df = self.samples[self.samples['document'].isin(ids)] # check ids are in the index
if df.shape[0] < self.samples.shape[0]:
all_ids = self.samples['document'].unique()
missing_ids = [i for i in all_ids if i not in ids]
logger.info("Validation IDs not in the index (removed from validation set): {}".format(missing_ids))
logger.info("Number of missing IDs: {}".format(str(len(missing_ids))))
logger.info("Number documents in the index to test: {}".format(str(len(all_ids) - len(missing_ids))))
df = df.groupby('query').agg({'document': lambda x: x.tolist()}).reset_index()
query_list = df['query'].to_list()
doc_list = df['document'].to_list()
q_idx = ["query_" + str(i) for i in range(len(query_list))]
queries = dict(zip(q_idx, query_list))
collection = dict(zip(all_ids, all_ids))
relations = dict(zip(q_idx, doc_list))
logger.info("Generated {} test queries of gold standard data".format(len(query_list)))
return queries, collection, relations
class UpdatedGCRetrieverData(RetrieverGSData):
def __init__(self,
available_ids,
level=['gold', 'silver'],
data_path=None,
validation_dir=ValidationConfig.DATA_ARGS['validation_dir'],
gold_standard=ValidationConfig.DATA_ARGS['retriever_gc']['gold_standard']
):
super().__init__(validation_dir, available_ids, gold_standard)
try:
if data_path: # if there is a path for data, use that
self.data_path = os.path.join(data_path, level)
else:
new_data = get_most_recent_dir(os.path.join(ValidationConfig.DATA_ARGS['validation_dir'], 'sent_transformer'))
self.data_path = os.path.join(new_data, level)
self.new_queries, self.new_collection, self.new_relations = self.load_new_data()
self.combine_in_domain()
except:
logger.info(f"Error getting data from {new_data}. Could not create UpdatedGCRetrieverData object.")
def load_new_data(self):
f = open_json('intelligent_search_data.json', self.data_path)
intel = json.loads(f)
logger.info(f"Added {str(len(intel['correct']))} correct query/sent pairs from updated GC retriever data.")
return intel['queries'], intel['collection'], intel['correct']
def combine_in_domain(self):
self.queries.update({k:v for (k, v) in self.new_queries.items() if k in self.new_relations.keys()})
self.collection.update(self.new_collection)
self.relations.update(self.new_relations)
return
class NLIData(ValidationData):
def __init__(
self,
sample_limit,
validation_dir=ValidationConfig.DATA_ARGS['validation_dir'],
matched=ValidationConfig.DATA_ARGS['nli']['matched'],
mismatched=ValidationConfig.DATA_ARGS['nli']['matched']
):
super().__init__(validation_dir)
self.matched = open_jsonl(matched, self.validation_dir)
self.mismatched = open_jsonl(mismatched, self.validation_dir)
self.sample_csv = self.get_sample_csv(sample_limit)
self.query_lookup = dict(zip(self.sample_csv['promptID'], self.sample_csv['sentence1']))
def get_sample_csv(self, sample_limit):
'''Format NLI data into smaller sample for evaluation'''
match_df = pd.DataFrame(self.matched)
mismatched_df = pd.DataFrame(self.mismatched)
match_df['set'] = 'matched'
mismatched_df['set'] = 'mismatched'
both = pd.concat([match_df, mismatched_df])
# assign int ranks based on gold label
gold_labels_map = {
'entailment': 2,
'neutral': 1,
'contradiction': 5
}
both['gold_label_int'] = both['gold_label'].map(gold_labels_map)
# filter out propmtIDs that don't have a clear 0, 1, 2 rank
sum_map = both.groupby('promptID')['gold_label_int'].sum().to_dict()
both['rank_sum'] = both['promptID'].map(sum_map)
both = both[both['rank_sum']==8]
# map ranks
rank_map = {
'entailment': 0,
'neutral': 1,
'contradiction': 2
}
both['expected_rank'] = both['gold_label'].map(rank_map)
cats = both['genre'].nunique()
# get smaller sample df with even proportion of genres across matched/mismatched
sample = pd.DataFrame()
for i in both['genre'].unique():
subset = both[both['genre']==i].sort_values(by='promptID')
if sample_limit:
split = sample_limit * 3 // cats
subset = subset.head(split)
sample = pd.concat([sample, subset])
logger.info(("Created {} sample sentence pairs from {} unique queries:".format(sample.shape[0], sample_limit)))
return sample[['genre', 'gold_label', 'pairID', 'promptID', 'sentence1', 'sentence2', 'expected_rank']]
class MatamoFeedback():
def __init__(
self,
start_date,
end_date,
exclude_searches
):
self.matamo = concat_matamo()
self.start_date = start_date
self.end_date = end_date
self.exclude_searches=exclude_searches
self.intel, self.qa = self.split_matamo()
def split_matamo(self):
'''Split QA queries from intelligent search queries'''
df = self.matamo
if self.start_date or self.end_date:
df = filter_date_range(df, self.start_date, self.end_date)
df.drop_duplicates(subset = ['user_id', 'createdAt', 'value_1', 'value_2'], inplace = True)
df['source'] = 'matamo'
df['correct'] = df['event_name'].apply(lambda x: ' '.join(x.split('_')[-2:])).map({'thumbs up': True, 'thumbs down': False})
df['type'] = df['event_name'].apply(lambda x: ' '.join(x.split('_')[:-2]))
df['value_5'] = df['value_5'].apply(lambda x: x.replace('sentence_results', 'sentence_results:') if type(x)==str else x)
intel = df[df['type']=='intelligent search'].copy()
intel.dropna(axis=1, how='all', inplace = True)
qa = df[df['type']=='qa'].copy()
qa.dropna(axis=1, how='all', inplace = True)
def process_matamo(df):
'''Reformat Matamo feedback'''
queries = []
cols = [i for i in df.columns if i[:5]=='value']
def process_row(row, col_name):
'''Split the pre-colon text from rows'''
if ':' in row:
row = row.split(':')
key = row[0]
vals = ':'.join(row[1:])
return key, vals
else:
return col_name, row
for i in df.index:
query = {}
query['date'] = df.loc[i, 'createdAt']
query['source'] = 'matamo'
query['correct_match'] = df.loc[i, 'correct']
for j in cols:
row = df.loc[i, j]
if type(row) == str and row[0] != '[':
key, val = process_row(row, j)
query[key] = val
if key in ['question', 'search_text', 'QA answer']:
clean_val = normalize_query(val)
clean_key = key + '_clean'
query[clean_key] = clean_val
queries.append(query)
return
|
pd.DataFrame(queries)
|
pandas.DataFrame
|
import pandas as pd
import datetime as dt
# Toggl-report.csv 読み込み
df =
|
pd.read_csv('Toggl-report.csv', sep=',')
|
pandas.read_csv
|
from copy import deepcopy
from handyspark.ml.base import HandyTransformers
from handyspark.plot import histogram, boxplot, scatterplot, strat_scatterplot, strat_histogram,\
consolidate_plots, post_boxplot
from handyspark.sql.pandas import HandyPandas
from handyspark.sql.transform import _MAPPING, HandyTransform
from handyspark.util import HandyException, dense_to_array, disassemble, ensure_list, check_columns, \
none2default
import inspect
from matplotlib.axes import Axes
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
from operator import itemgetter, add
import pandas as pd
from pyspark.ml.stat import Correlation
from pyspark.ml.feature import Bucketizer
from pyspark.mllib.stat import Statistics
from pyspark.sql import DataFrame, GroupedData, Window, functions as F, Column, Row
from pyspark.ml.feature import VectorAssembler, StandardScaler, PCA
from pyspark.ml.pipeline import Pipeline
from scipy.stats import chi2
from scipy.linalg import inv
def toHandy(self):
"""Converts Spark DataFrame into HandyFrame.
"""
return HandyFrame(self)
def notHandy(self):
return self
DataFrame.toHandy = toHandy
DataFrame.notHandy = notHandy
def agg(f):
f.__is_agg = True
return f
def inccol(f):
f.__is_inccol = True
return f
class Handy(object):
def __init__(self, df):
self._df = df
# classification
self._is_classification = False
self._nclasses = None
self._classes = None
# transformers
self._imputed_values = {}
self._fenced_values = {}
# groups / strata
self._group_cols = None
self._strata = None
self._strata_object = None
self._strata_plot = None
self._clear_stratification()
self._safety_limit = 1000
self._safety = True
self._update_types()
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ['_df', '_strata_object', '_strata_plot']:
setattr(result, k, deepcopy(v, memo))
return result
def __getitem__(self, *args):
if isinstance(args[0], tuple):
args = args[0]
item = args[0]
n = 20
if len(args) > 1:
n = args[1]
if n is None:
n = -1
if isinstance(item, int):
idx = item + (len(self._group_cols) if self._group_cols is not None else 0)
assert idx < len(self._df.columns), "Invalid column index {}".format(idx)
item = list(self._df.columns)[idx]
if isinstance(item, str):
if self._group_cols is None or len(self._group_cols) == 0:
res = self._take_array(item, n)
if res.ndim > 1:
res = res.tolist()
res = pd.Series(res, name=item)
if self._strata is not None:
strata = list(map(lambda v: v[1].to_dict(), self.strata.iterrows()))
if len(strata) == len(res):
res = pd.concat([pd.DataFrame(strata), res], axis=1).set_index(self._strata).sort_index()
return res
else:
check_columns(self._df, list(self._group_cols) + [item])
pdf = self._df.notHandy().select(list(self._group_cols) + [item])
if n != -1:
pdf = pdf.limit(n)
res = pdf.toPandas().set_index(list(self._group_cols)).sort_index()[item]
return res
@property
def stages(self):
return (len(list(filter(lambda v: '+' == v,
map(lambda s: s.strip()[0],
self._df.rdd.toDebugString().decode().split('\n'))))) + 1)
@property
def statistics_(self):
return self._imputed_values
@property
def fences_(self):
return self._fenced_values
@property
def is_classification(self):
return self._is_classification
@property
def classes(self):
return self._classes
@property
def nclasses(self):
return self._nclasses
@property
def response(self):
return self._response
@property
def ncols(self):
return len(self._types)
@property
def nrows(self):
return self._df.count()
@property
def shape(self):
return (self.nrows, self.ncols)
@property
def strata(self):
if self._strata is not None:
return pd.DataFrame(data=self._strata_combinations, columns=self._strata)
@property
def strata_colnames(self):
if self._strata is not None:
return list(map(str, ensure_list(self._strata)))
else:
return []
def _stratify(self, strata):
return HandyStrata(self, strata)
def _clear_stratification(self):
self._strata = None
self._strata_object = None
self._strata_plot = None
self._strata_combinations = []
self._strata_raw_combinations = []
self._strata_clauses = []
self._strata_raw_clauses = []
self._n_cols = 1
self._n_rows = 1
def _set_stratification(self, strata, raw_combinations, raw_clauses, combinations, clauses):
if strata is not None:
assert len(combinations[0]) == len(strata), "Mismatched number of combinations and strata!"
self._strata = strata
self._strata_raw_combinations = raw_combinations
self._strata_raw_clauses = raw_clauses
self._strata_combinations = combinations
self._strata_clauses = clauses
self._n_cols = len(set(map(itemgetter(0), combinations)))
try:
self._n_rows = len(set(map(itemgetter(1), combinations)))
except IndexError:
self._n_rows = 1
def _build_strat_plot(self, n_rows, n_cols, **kwargs):
fig, axs = plt.subplots(n_rows, n_cols, **kwargs)
if n_rows == 1:
axs = [axs]
if n_cols == 1:
axs = [axs]
self._strata_plot = (fig, [ax for col in np.transpose(axs) for ax in col])
def _update_types(self):
self._types = list(map(lambda t: (t.name, t.dataType.typeName()), self._df.schema.fields))
self._numerical = list(map(itemgetter(0), filter(lambda t: t[1] in ['byte', 'short', 'integer', 'long',
'float', 'double'], self._types)))
self._continuous = list(map(itemgetter(0), filter(lambda t: t[1] in ['double', 'float'], self._types)))
self._categorical = list(map(itemgetter(0), filter(lambda t: t[1] in ['byte', 'short', 'integer', 'long',
'boolan', 'string'], self._types)))
self._array = list(map(itemgetter(0), filter(lambda t: t[1] in ['array', 'map'], self._types)))
self._string = list(map(itemgetter(0), filter(lambda t: t[1] in ['string'], self._types)))
def _take_array(self, colname, n):
check_columns(self._df, colname)
datatype = self._df.notHandy().select(colname).schema.fields[0].dataType.typeName()
rdd = self._df.notHandy().select(colname).rdd.map(itemgetter(0))
if n == -1:
data = rdd.collect()
else:
data = rdd.take(n)
return np.array(data, dtype=_MAPPING.get(datatype, 'object'))
def _value_counts(self, colnames, dropna=True, raw=False):
colnames = ensure_list(colnames)
strata = self.strata_colnames
colnames = strata + colnames
check_columns(self._df, colnames)
data = self._df.notHandy().select(colnames)
if dropna:
data = data.dropna()
values = (data.groupby(colnames).agg(F.count('*').alias('value_counts'))
.toPandas().set_index(colnames).sort_index()['value_counts'])
if not raw:
for level, col in enumerate(ensure_list(self._strata)):
if not isinstance(col, str):
values.index.set_levels(pd.Index(col._clauses[1:-1]), level=level, inplace=True)
values.index.set_names(col.colname, level=level, inplace=True)
return values
def _fillna(self, target, values):
assert isinstance(target, DataFrame), "Target must be a DataFrame"
items = values.items()
for colname, v in items:
if isinstance(v, dict):
clauses = v.keys()
whens = ' '.join(['WHEN (({clause}) AND (isnan({col}) OR isnull({col}))) THEN {quote}{filling}{quote}'
.format(clause=clause, col=colname, filling=v[clause],
quote='"' if isinstance(v[clause], str) else '')
for clause in clauses])
else:
whens = ('WHEN (isnan({col}) OR isnull({col})) THEN {quote}{filling}{quote}'
.format(col=colname, filling=v,
quote='"' if isinstance(v, str) else ''))
expression = F.expr('CASE {expr} ELSE {col} END'.format(expr=whens, col=colname))
target = target.withColumn(colname, expression)
return target
def __stat_to_dict(self, colname, stat):
if len(self._strata_clauses):
if isinstance(stat, pd.Series):
stat = stat.to_frame(colname)
return {clause: stat.query(raw_clause)[colname].iloc[0]
for clause, raw_clause in zip(self._strata_clauses, self._strata_raw_clauses)}
else:
return stat[colname]
def _fill_values(self, continuous, categorical, strategy):
values = {}
colnames = list(map(itemgetter(0), filter(lambda t: t[1] == 'mean', zip(continuous, strategy))))
values.update(dict([(col, self.__stat_to_dict(col, self.mean(col))) for col in colnames]))
colnames = list(map(itemgetter(0), filter(lambda t: t[1] == 'median', zip(continuous, strategy))))
values.update(dict([(col, self.__stat_to_dict(col, self.median(col))) for col in colnames]))
values.update(dict([(col, self.__stat_to_dict(col, self.mode(col)))
for col in categorical if col in self._categorical]))
return values
def __fill_self(self, continuous, categorical, strategy):
continuous = ensure_list(continuous)
categorical = ensure_list(categorical)
check_columns(self._df, continuous + categorical)
strategy = none2default(strategy, 'mean')
if continuous == ['all']:
continuous = self._continuous
if categorical == ['all']:
categorical = self._categorical
if isinstance(strategy, (list, tuple)):
assert len(continuous) == len(strategy), "There must be a strategy to each column."
else:
strategy = [strategy] * len(continuous)
values = self._fill_values(continuous, categorical, strategy)
self._imputed_values.update(values)
res = HandyFrame(self._fillna(self._df, values), self)
return res
def _dense_to_array(self, colname, array_colname):
check_columns(self._df, colname)
res = dense_to_array(self._df.notHandy(), colname, array_colname)
return HandyFrame(res, self)
def _agg(self, name, func, colnames):
colnames = none2default(colnames, self._df.columns)
colnames = ensure_list(colnames)
check_columns(self._df, self.strata_colnames + [col for col in colnames if not isinstance(col, Column)])
if func is None:
func = getattr(F, name)
res = (self._df.notHandy()
.groupby(self.strata_colnames)
.agg(*(func(col).alias(str(col)) for col in colnames if str(col) not in self.strata_colnames))
.toPandas())
if len(res) == 1:
res = res.iloc[0]
res.name = name
return res
def _calc_fences(self, colnames, k=1.5, precision=.01):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
strata = self.strata_colnames
pdf = (self._df.notHandy()
.groupby(strata)
.agg(F.count(F.lit(1)).alias('nrows'),
*[F.expr('approx_percentile({}, {}, {})'.format(c, q, 1./precision)).alias('{}_{}%'.format(c, int(q * 100)))
for q in [.25, .50, .75] for c in colnames],
*[F.mean(c).alias('{}_mean'.format(c)) for c in colnames]).toPandas())
for col in colnames:
pdf.loc[:, '{}_iqr'.format(col)] = pdf.loc[:, '{}_75%'.format(col)] - pdf.loc[:, '{}_25%'.format(col)]
pdf.loc[:, '{}_lfence'.format(col)] = pdf.loc[:, '{}_25%'.format(col)] - k * pdf.loc[:, '{}_iqr'.format(col)]
pdf.loc[:, '{}_ufence'.format(col)] = pdf.loc[:, '{}_75%'.format(col)] + k * pdf.loc[:, '{}_iqr'.format(col)]
return pdf
def _calc_mahalanobis_distance(self, colnames, output_col='__mahalanobis'):
"""Computes Mahalanobis distance from origin
"""
sdf = self._df.notHandy()
check_columns(sdf, colnames)
# Builds pipeline to assemble feature columns and scale them
assembler = VectorAssembler(inputCols=colnames, outputCol='__features')
scaler = StandardScaler(inputCol='__features', outputCol='__scaled', withMean=True)
pipeline = Pipeline(stages=[assembler, scaler])
features = pipeline.fit(sdf).transform(sdf)
# Computes correlation between features and inverts it
# Since we scaled the features, we can assume they have unit variance
# and therefore, correlation and covariance matrices are the same!
mat = Correlation.corr(features, '__scaled').head()[0].toArray()
inv_mat = inv(mat)
# Builds Pandas UDF to compute Mahalanobis distance from origin
# sqrt((V - 0) * inv_M * (V - 0))
try:
import pyarrow
@F.pandas_udf('double')
def pudf_mult(v):
return v.apply(lambda v: np.sqrt(np.dot(np.dot(v, inv_mat), v)))
except:
@F.udf('double')
def pudf_mult(v):
return v.apply(lambda v: np.sqrt(np.dot(np.dot(v, inv_mat), v)))
# Convert feature vector into array
features = dense_to_array(features, '__scaled', '__array_scaled')
# Computes Mahalanobis distance and flags as outliers all elements above critical value
distance = (features
.withColumn('__mahalanobis', pudf_mult('__array_scaled'))
.drop('__features', '__scaled', '__array_scaled'))
return distance
def _set_mahalanobis_outliers(self, colnames, critical_value=.999,
input_col='__mahalanobis', output_col='__outlier'):
"""Compares Mahalanobis distances to critical values using
Chi-Squared distribution to identify possible outliers.
"""
distance = self._calc_mahalanobis_distance(colnames)
# Computes critical value
critical_value = chi2.ppf(critical_value, len(colnames))
# Computes Mahalanobis distance and flags as outliers all elements above critical value
outlier = (distance.withColumn(output_col, F.col(input_col) > critical_value))
return outlier
def _calc_bxp_stats(self, fences_df, colname, showfliers=False):
strata = self.strata_colnames
clauses = self._strata_raw_clauses
if not len(clauses):
clauses = [None]
qnames = ['25%', '50%', '75%', 'mean', 'lfence', 'ufence']
col_summ = fences_df[strata + ['{}_{}'.format(colname, q) for q in qnames] + ['nrows']]
col_summ.columns = strata + qnames + ['nrows']
if len(strata):
col_summ = col_summ.set_index(strata)
lfence, ufence = col_summ[['lfence']], col_summ[['ufence']]
expression = None
for clause in clauses:
if clause is not None:
partial = F.col(colname).between(lfence.query(clause).iloc[0, 0], ufence.query(clause).iloc[0, 0])
partial &= F.expr(clause)
else:
partial = F.col(colname).between(lfence.iloc[0, 0], ufence.iloc[0, 0])
if expression is None:
expression = partial
else:
expression |= partial
outlier = self._df.notHandy().withColumn('__{}_outlier'.format(colname), ~expression)
minmax = (outlier
.filter('not __{}_outlier'.format(colname))
.groupby(strata)
.agg(F.min(colname).alias('min'),
F.max(colname).alias('max'))
.toPandas())
if len(strata):
minmax = [minmax.query(clause).iloc[0][['min', 'max']].values for clause in clauses]
else:
minmax = [minmax.iloc[0][['min', 'max']].values]
fliers_df = outlier.filter('__{}_outlier'.format(colname))
fliers_df = [fliers_df.filter(clause) for clause in clauses] if len(strata) else [fliers_df]
fliers_count = [df.count() for df in fliers_df]
if showfliers:
fliers = [(df
.select(F.abs(F.col(colname)).alias(colname))
.orderBy(F.desc(colname))
.limit(1000)
.toPandas()[colname].values) for df in fliers_df]
else:
fliers = [[]] * len(clauses)
stats = [] # each item corresponds to a different clause - all items belong to the same column
nrows = []
for clause, whiskers, outliers in zip(clauses, minmax, fliers):
summary = col_summ
if clause is not None:
summary = summary.query(clause)
item = {'mean': summary['mean'].values[0],
'med': summary['50%'].values[0],
'q1': summary['25%'].values[0],
'q3': summary['75%'].values[0],
'whislo': whiskers[0],
'whishi': whiskers[1],
'fliers': outliers}
stats.append(item)
nrows.append(summary['nrows'].values[0])
if not len(nrows):
nrows = summary['nrows'].values[0]
return stats, fliers_count, nrows
def set_response(self, colname):
check_columns(self._df, colname)
self._response = colname
if colname is not None:
if colname not in self._continuous:
self._is_classification = True
self._classes = self._df.notHandy().select(colname).rdd.map(itemgetter(0)).distinct().collect()
self._nclasses = len(self._classes)
return self
def disassemble(self, colname, new_colnames=None):
check_columns(self._df, colname)
res = disassemble(self._df.notHandy(), colname, new_colnames)
return HandyFrame(res, self)
def to_metrics_RDD(self, prob_col, label):
check_columns(self._df, [prob_col, label])
return self.disassemble(prob_col).select('{}_1'.format(prob_col), F.col(label).cast('double')).rdd.map(tuple)
def corr(self, colnames=None, method='pearson'):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
if self._strata is not None:
colnames = sorted([col for col in colnames if col not in self.strata_colnames])
correlations = Statistics.corr(self._df.notHandy().select(colnames).dropna().rdd.map(lambda row: row[0:]), method=method)
pdf = pd.DataFrame(correlations, columns=colnames, index=colnames)
return pdf
def fill(self, *args, continuous=None, categorical=None, strategy=None):
if len(args) and isinstance(args[0], DataFrame):
return self._fillna(args[0], self._imputed_values)
else:
return self.__fill_self(continuous=continuous, categorical=categorical, strategy=strategy)
@agg
def isnull(self, ratio=False):
def func(colname):
return F.sum(F.isnull(colname).cast('int')).alias(colname)
name = 'missing'
if ratio:
name += '(ratio)'
missing = self._agg(name, func, self._df.columns)
if ratio:
nrows = self._agg('nrows', F.sum, F.lit(1))
if isinstance(missing, pd.Series):
missing = missing / nrows["Column<b'1'>"]
else:
missing.iloc[:, 1:] = missing.iloc[:, 1:].values / nrows["Column<b'1'>"].values.reshape(-1, 1)
if len(self.strata_colnames):
missing = missing.set_index(self.strata_colnames).T.unstack()
missing.name = name
return missing
@agg
def nunique(self, colnames=None):
res = self._agg('nunique', F.approx_count_distinct, colnames)
if len(self.strata_colnames):
res = res.set_index(self.strata_colnames).T.unstack()
res.name = 'nunique'
return res
def outliers(self, colnames=None, ratio=False, method='tukey', **kwargs):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
res = None
if method == 'tukey':
outliers = []
try:
k = float(kwargs['k'])
except KeyError:
k = 1.5
fences_df = self._calc_fences(colnames, k=k, precision=.01)
index = fences_df[self.strata_colnames].set_index(self.strata_colnames).index \
if len(self.strata_colnames) else None
for colname in colnames:
stats, counts, nrows = self._calc_bxp_stats(fences_df, colname, showfliers=False)
outliers.append(pd.Series(counts, index=index, name=colname))
if ratio:
outliers[-1] /= nrows
res = pd.DataFrame(outliers).unstack()
if not len(self.strata_colnames):
res = res.droplevel(0)
name = 'outliers'
if ratio:
name += '(ratio)'
res.name = name
return res
def get_outliers(self, colnames=None, critical_value=.999):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
outliers = self._set_mahalanobis_outliers(colnames, critical_value)
df = outliers.filter('__outlier').orderBy(F.desc('__mahalanobis')).drop('__outlier', '__mahalanobis')
return HandyFrame(df, self)
def remove_outliers(self, colnames=None, critical_value=.999):
colnames = none2default(colnames, self._numerical)
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
outliers = self._set_mahalanobis_outliers(colnames, critical_value)
df = outliers.filter('not __outlier').drop('__outlier', '__mahalanobis')
return HandyFrame(df, self)
def fence(self, colnames, k=1.5):
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
pdf = self._calc_fences(colnames, k=k)
if len(self.strata_colnames):
pdf = pdf.set_index(self.strata_colnames)
df = self._df.notHandy()
for colname in colnames:
lfence, ufence = pdf.loc[:, ['{}_lfence'.format(colname)]], pdf.loc[:, ['{}_ufence'.format(colname)]]
if len(self._strata_raw_clauses):
whens1 = ' '.join(['WHEN ({clause}) THEN greatest({col}, {fence})'.format(clause=clause,
col=colname,
fence=lfence.query(clause).iloc[0, 0])
for clause in self._strata_raw_clauses])
whens2 = ' '.join(['WHEN ({clause}) THEN least({col}, {fence})'.format(clause=clause,
col=colname,
fence=ufence.query(clause).iloc[0, 0])
for clause in self._strata_raw_clauses])
expression1 = F.expr('CASE {} END'.format(whens1))
expression2 = F.expr('CASE {} END'.format(whens2))
self._fenced_values.update({colname: {clause: [lfence.query(clause).iloc[0, 0],
ufence.query(clause).iloc[0, 0]]
for clause in self._strata_clauses}})
else:
self._fenced_values.update({colname: [lfence.iloc[0, 0], ufence.iloc[0, 0]]})
expression1 = F.expr('greatest({col}, {fence})'.format(col=colname, fence=lfence.iloc[0, 0]))
expression2 = F.expr('least({col}, {fence})'.format(col=colname, fence=ufence.iloc[0, 0]))
df = df.withColumn(colname, expression1).withColumn(colname, expression2)
return HandyFrame(df.select(self._df.columns), self)
@inccol
def value_counts(self, colnames, dropna=True):
return self._value_counts(colnames, dropna)
@inccol
def mode(self, colname):
check_columns(self._df, [colname])
if self._strata is None:
values = (self._df.notHandy().select(colname).dropna()
.groupby(colname).agg(F.count('*').alias('mode'))
.orderBy(F.desc('mode')).limit(1)
.toPandas()[colname][0])
return pd.Series(values, index=[colname], name='mode')
else:
strata = self.strata_colnames
colnames = strata + [colname]
values = (self._df.notHandy().select(colnames).dropna()
.groupby(colnames).agg(F.count('*').alias('mode'))
.withColumn('order', F.row_number().over(Window.partitionBy(strata).orderBy(F.desc('mode'))))
.filter('order == 1').drop('order')
.toPandas().set_index(strata).sort_index()[colname])
values.name = 'mode'
return values
@inccol
def entropy(self, colnames):
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
sdf = self._df.notHandy()
n = sdf.count()
entropy = []
for colname in colnames:
if colname in self._categorical:
res = (self._df
.groupby(self.strata_colnames + [colname])
.agg(F.count('*').alias('value_counts')).withColumn('probability', F.col('value_counts') / n)
.groupby(self.strata_colnames)
.agg(F.sum(F.expr('-log2(probability) * probability')).alias(colname))
.safety_off()
.cols[self.strata_colnames + [colname]][:])
if len(self.strata_colnames):
res.set_index(self.strata_colnames, inplace=True)
res = res.unstack()
else:
res = res[colname]
res.index = [colname]
else:
res = pd.Series(None, index=[colname])
res.name = 'entropy'
entropy.append(res)
return pd.concat(entropy).sort_index()
@inccol
def mutual_info(self, colnames):
def distribution(sdf, colnames):
return sdf.groupby(colnames).agg(F.count('*').alias('__count'))
check_columns(self._df, colnames)
n = len(colnames)
probs = []
sdf = self._df.notHandy()
for i in range(n):
probs.append(distribution(sdf, self.strata_colnames + [colnames[i]]))
if len(self.strata_colnames):
nrows = sdf.groupby(self.strata_colnames).agg(F.count('*').alias('__n'))
else:
nrows = sdf.count()
entropies = self.entropy(colnames)
res = []
for i in range(n):
for j in range(i, n):
if i == j:
mi = pd.Series(entropies[colnames[i]], name='mi').to_frame()
else:
tdf = distribution(sdf, self.strata_colnames + [colnames[i], colnames[j]])
if len(self.strata_colnames):
tdf = tdf.join(nrows, on=self.strata_colnames)
else:
tdf = tdf.withColumn('__n', F.lit(nrows))
tdf = tdf.join(probs[i].toDF(*self.strata_colnames, colnames[i], '__count0'), on=self.strata_colnames + [colnames[i]])
tdf = tdf.join(probs[j].toDF(*self.strata_colnames, colnames[j], '__count1'), on=self.strata_colnames + [colnames[j]])
mi = (tdf
.groupby(self.strata_colnames)
.agg(F.sum(F.expr('log2(__count * __n / (__count0 * __count1)) * __count / __n')).alias('mi'))
.toPandas())
if len(self.strata_colnames):
mi.set_index(self.strata_colnames, inplace=True)
res.append(mi.assign(ci=colnames[j], cj=colnames[i]))
res.append(mi.assign(ci=colnames[i], cj=colnames[j]))
res = pd.concat(res).set_index(['ci', 'cj'], append=len(self.strata_colnames)).sort_index()
res = pd.pivot_table(res, index=self.strata_colnames + ['ci'], columns=['cj'])
res.index.names = self.strata_colnames + ['']
res.columns = res.columns.droplevel(0).rename('')
return res
@agg
def mean(self, colnames):
return self._agg('mean', F.mean, colnames)
@agg
def min(self, colnames):
return self._agg('min', F.min, colnames)
@agg
def max(self, colnames):
return self._agg('max', F.max, colnames)
@agg
def percentile(self, colnames, perc=50, precision=.01):
def func(c):
return F.expr('approx_percentile({}, {}, {})'.format(c, perc/100., 1./precision))
try:
name = {25: 'q1', 50: 'median', 75: 'q3'}[perc]
except KeyError:
name = 'percentile_{}'.format(perc)
return self._agg(name, func, colnames)
@agg
def median(self, colnames, precision=.01):
return self.percentile(colnames, 50, precision)
@agg
def stddev(self, colnames):
return self._agg('stddev', F.stddev, colnames)
@agg
def var(self, colnames):
return self._agg('var', F.stddev, colnames) ** 2
@agg
def q1(self, colnames, precision=.01):
return self.percentile(colnames, 25, precision)
@agg
def q3(self, colnames, precision=.01):
return self.percentile(colnames, 75, precision)
### Boxplot functions
def _strat_boxplot(self, colnames, **kwargs):
n_rows = n_cols = 1
kwds = deepcopy(kwargs)
for kw in ['showfliers', 'precision']:
try:
del kwds[kw]
except KeyError:
pass
if isinstance(colnames, (tuple, list)) and (len(colnames) > 1):
n_rows = self._n_rows
n_cols = self._n_cols
self._build_strat_plot(n_rows, n_cols, **kwds)
return None
@inccol
def boxplot(self, colnames, ax=None, showfliers=True, k=1.5, precision=.01, **kwargs):
colnames = ensure_list(colnames)
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
assert len(colnames), "Only numerical columns can be plot!"
return boxplot(self._df, colnames, ax, showfliers, k, precision)
def _post_boxplot(self, res):
return post_boxplot(self._strata_plot[1], res)
### Scatterplot functions
def _strat_scatterplot(self, colnames, **kwargs):
self._build_strat_plot(self._n_rows, self._n_cols, **kwargs)
return strat_scatterplot(self._df.notHandy(), colnames[0], colnames[1])
@inccol
def scatterplot(self, colnames, ax=None, **kwargs):
assert len(colnames) == 2, "There must be two columns to plot!"
check_columns(self._df, colnames)
colnames = [col for col in colnames if col in self._numerical]
assert len(colnames) == 2, "Both columns must be numerical!"
return scatterplot(self._df, colnames[0], colnames[1], ax=ax)
### Histogram functions
def _strat_hist(self, colname, bins=10, **kwargs):
self._build_strat_plot(self._n_rows, self._n_cols, **kwargs)
categorical = True
if colname in self._continuous:
categorical = False
#res = strat_histogram(self._df.notHandy(), colname, bins, categorical)
res = strat_histogram(self._df, colname, bins, categorical)
self._strata_plot[0].suptitle('')
plt.tight_layout()
return res
@inccol
def hist(self, colname, bins=10, ax=None, **kwargs):
# TO DO
# include split per response/columns
assert len(ensure_list(colname)) == 1, "Only single columns can be plot!"
check_columns(self._df, colname)
if colname in self._continuous:
return histogram(self._df, colname, bins=bins, categorical=False, ax=ax)
else:
return histogram(self._df, colname, bins=bins, categorical=True, ax=ax)
class HandyGrouped(GroupedData):
def __init__(self, jgd, df, *args):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
self._cols = args
def agg(self, *exprs):
df = super().agg(*exprs)
handy = deepcopy(self._df._handy)
handy._group_cols = self._cols
return HandyFrame(df, handy)
def __repr__(self):
return "HandyGrouped[%s]" % (", ".join("%s" % c for c in self._group_cols))
class HandyFrame(DataFrame):
"""HandySpark version of DataFrame.
Attributes
----------
cols: HandyColumns
class to access pandas-like column based methods implemented in Spark
pandas: HandyPandas
class to access pandas-like column based methods through pandas UDFs
transformers: HandyTransformers
class to generate Handy transformers
stages: integer
number of stages in the execution plan
response: string
name of the response column
is_classification: boolean
True if response is a categorical variable
classes: list
list of classes for a classification problem
nclasses: integer
number of classes for a classification problem
ncols: integer
number of columns of the HandyFrame
nrows: integer
number of rows of the HandyFrame
shape: tuple
tuple representing dimensionality of the HandyFrame
statistics_: dict
imputation fill value for each feature
If stratified, first level keys are filter clauses for stratification
fences_: dict
fence values for each feature
If stratified, first level keys are filter clauses for stratification
is_stratified: boolean
True if HandyFrame was stratified
values: ndarray
Numpy representation of HandyFrame.
Available methods:
- notHandy: makes it a plain Spark dataframe
- stratify: used to perform stratified operations
- isnull: checks for missing values
- fill: fills missing values
- outliers: returns counts of outliers, columnwise, using Tukey's method
- get_outliers: returns list of outliers using Mahalanobis distance
- remove_outliers: filters out outliers using Mahalanobis distance
- fence: fences outliers
- set_safety_limit: defines new safety limit for collect operations
- safety_off: disables safety limit for a single operation
- assign: appends a new columns based on an expression
- nunique: returns number of unique values in each column
- set_response: sets column to be used as response / label
- disassemble: turns a vector / array column into multiple columns
- to_metrics_RDD: turns probability and label columns into a tuple RDD
"""
def __init__(self, df, handy=None):
super().__init__(df._jdf, df.sql_ctx)
if handy is None:
handy = Handy(self)
else:
handy = deepcopy(handy)
handy._df = self
handy._update_types()
self._handy = handy
self._safety = self._handy._safety
self._safety_limit = self._handy._safety_limit
self.__overriden = ['collect', 'take']
self._strat_handy = None
self._strat_index = None
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__') and name not in self.__overriden:
def wrapper(*args, **kwargs):
try:
res = attr(*args, **kwargs)
except HandyException as e:
raise HandyException(str(e), summary=False)
except Exception as e:
raise HandyException(str(e), summary=True)
if name != 'notHandy':
if not isinstance(res, HandyFrame):
if isinstance(res, DataFrame):
res = HandyFrame(res, self._handy)
if isinstance(res, GroupedData):
res = HandyGrouped(res._jgd, res._df, *args)
return res
return wrapper
else:
return attr
def __repr__(self):
return "HandyFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _get_strata(self):
plot = None
object = None
if self._strat_handy is not None:
try:
object = self._strat_handy._strata_object
except AttributeError:
pass
if object is None:
object = True
try:
plots = self._strat_handy._strata_plot[1]
#if len(plots) > 1:
# plot = plots[self._strat_index]
plot = plots
except (AttributeError, IndexError):
pass
return plot, object
def _gen_row_ids(self, *args):
# EXPERIMENTAL - DO NOT USE!
return (self
.sort(*args)
.withColumn('_miid', F.monotonically_increasing_id())
.withColumn('_row_id', F.row_number().over(Window().orderBy(F.col('_miid'))))
.drop('_miid'))
def _loc(self, lower_bound, upper_bound):
# EXPERIMENTAL - DO NOT USE!
assert '_row_id' in self.columns, "Cannot use LOC without generating `row_id`s first!"
clause = F.col('_row_id').between(lower_bound, upper_bound)
return self.filter(clause)
@property
def cols(self):
"""Returns a class to access pandas-like column based methods implemented in Spark
Available methods:
- min
- max
- median
- q1
- q3
- stddev
- value_counts
- mode
- corr
- nunique
- hist
- boxplot
- scatterplot
"""
return HandyColumns(self, self._handy)
@property
def pandas(self):
"""Returns a class to access pandas-like column based methods through pandas UDFs
Available methods:
- betweeen / between_time
- isin
- isna / isnull
- notna / notnull
- abs
- clip / clip_lower / clip_upper
- replace
- round / truncate
- tz_convert / tz_localize
"""
return HandyPandas(self)
@property
def transformers(self):
"""Returns a class to generate Handy transformers
Available transformers:
- HandyImputer
- HandyFencer
"""
return HandyTransformers(self)
@property
def stages(self):
"""Returns the number of stages in the execution plan.
"""
return self._handy.stages
@property
def response(self):
"""Returns the name of the response column.
"""
return self._handy.response
@property
def is_classification(self):
"""Returns True if response is a categorical variable.
"""
return self._handy.is_classification
@property
def classes(self):
"""Returns list of classes for a classification problem.
"""
return self._handy.classes
@property
def nclasses(self):
"""Returns the number of classes for a classification problem.
"""
return self._handy.nclasses
@property
def ncols(self):
"""Returns the number of columns of the HandyFrame.
"""
return self._handy.ncols
@property
def nrows(self):
"""Returns the number of rows of the HandyFrame.
"""
return self._handy.nrows
@property
def shape(self):
"""Return a tuple representing the dimensionality of the HandyFrame.
"""
return self._handy.shape
@property
def statistics_(self):
"""Returns dictionary with imputation fill value for each feature.
If stratified, first level keys are filter clauses for stratification.
"""
return self._handy.statistics_
@property
def fences_(self):
"""Returns dictionary with fence values for each feature.
If stratified, first level keys are filter clauses for stratification.
"""
return self._handy.fences_
@property
def values(self):
"""Numpy representation of HandyFrame.
"""
# safety limit will kick in, unless explicitly off before
tdf = self
if self._safety:
tdf = tdf.limit(self._safety_limit)
return np.array(tdf.rdd.map(tuple).collect())
def notHandy(self):
"""Converts HandyFrame back into Spark's DataFrame
"""
return DataFrame(self._jdf, self.sql_ctx)
def set_safety_limit(self, limit):
"""Sets safety limit used for ``collect`` method.
"""
self._handy._safety_limit = limit
self._safety_limit = limit
def safety_off(self):
"""Disables safety limit for a single call of ``collect`` method.
"""
self._handy._safety = False
self._safety = False
return self
def collect(self):
"""Returns all the records as a list of :class:`Row`.
By default, its output is limited by the safety limit.
To get original `collect` behavior, call ``safety_off`` method first.
"""
try:
if self._safety:
print('\nINFO: Safety is ON - returning up to {} instances.'.format(self._safety_limit))
return super().limit(self._safety_limit).collect()
else:
res = super().collect()
self._safety = True
return res
except HandyException as e:
raise HandyException(str(e), summary=False)
except Exception as e:
raise HandyException(str(e), summary=True)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
"""
self._handy._safety = False
res = super().take(num)
self._handy._safety = True
return res
def stratify(self, strata):
"""Stratify the HandyFrame.
Stratified operations should be more efficient than group by operations, as they
rely on three iterative steps, namely: filtering the underlying HandyFrame, performing
the operation and aggregating the results.
"""
strata = ensure_list(strata)
check_columns(self, strata)
return self._handy._stratify(strata)
def transform(self, f, name=None, args=None, returnType=None):
"""INTERNAL USE
"""
return HandyTransform.transform(self, f, name=name, args=args, returnType=returnType)
def apply(self, f, name=None, args=None, returnType=None):
"""INTERNAL USE
"""
return HandyTransform.apply(self, f, name=name, args=args, returnType=returnType)
def assign(self, **kwargs):
"""Assign new columns to a HandyFrame, returning a new object (a copy)
with all the original columns in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names.
If the values are callable, they are computed on the DataFrame and
assigned to the new columns.
If the values are not callable, (e.g. a scalar, or string),
they are simply assigned.
Returns
-------
df : HandyFrame
A new HandyFrame with the new columns in addition to
all the existing columns.
"""
return HandyTransform.assign(self, **kwargs)
@agg
def isnull(self, ratio=False):
"""Returns array with counts of missing value for each column in the HandyFrame.
Parameters
----------
ratio: boolean, default False
If True, returns ratios instead of absolute counts.
Returns
-------
counts: Series
"""
return self._handy.isnull(ratio)
@agg
def nunique(self):
"""Return Series with number of distinct observations for all columns.
Parameters
----------
exact: boolean, optional
If True, computes exact number of unique values, otherwise uses an approximation.
Returns
-------
nunique: Series
"""
return self._handy.nunique(self.columns) #, exact)
@inccol
def outliers(self, ratio=False, method='tukey', **kwargs):
"""Return Series with number of outlier observations according to
the specified method for all columns.
Parameters
----------
ratio: boolean, optional
If True, returns proportion instead of counts.
Default is True.
method: string, optional
Method used to detect outliers. Currently, only Tukey's method is supported.
Default is tukey.
Returns
-------
outliers: Series
"""
return self._handy.outliers(self.columns, ratio=ratio, method=method, **kwargs)
def get_outliers(self, colnames=None, critical_value=.999):
"""Returns HandyFrame containing all rows deemed as outliers using
Mahalanobis distance and informed critical value.
Parameters
----------
colnames: list of str, optional
List of columns to be used for computing Mahalanobis distance.
Default includes all numerical columns
critical_value: float, optional
Critical value for chi-squared distribution to classify outliers
according to Mahalanobis distance.
Default is .999 (99.9%).
"""
return self._handy.get_outliers(colnames, critical_value)
def remove_outliers(self, colnames=None, critical_value=.999):
"""Returns HandyFrame containing only rows NOT deemed as outliers
using Mahalanobis distance and informed critical value.
Parameters
----------
colnames: list of str, optional
List of columns to be used for computing Mahalanobis distance.
Default includes all numerical columns
critical_value: float, optional
Critical value for chi-squared distribution to classify outliers
according to Mahalanobis distance.
Default is .999 (99.9%).
"""
return self._handy.remove_outliers(colnames, critical_value)
def set_response(self, colname):
"""Sets column to be used as response in supervised learning algorithms.
Parameters
----------
colname: string
Returns
-------
self
"""
check_columns(self, colname)
return self._handy.set_response(colname)
@inccol
def fill(self, *args, categorical=None, continuous=None, strategy=None):
"""Fill NA/NaN values using the specified methods.
The values used for imputation are kept in ``statistics_`` property
and can later be used to generate a corresponding HandyImputer transformer.
Parameters
----------
categorical: 'all' or list of string, optional
List of categorical columns.
These columns are filled with its coresponding modes (most common values).
continuous: 'all' or list of string, optional
List of continuous value columns.
By default, these columns are filled with its corresponding means.
If a same-sized list is provided in the ``strategy`` argument, it uses
the corresponding straegy for each column.
strategy: list of string, optional
If informed, it must contain a strategy - either ``mean`` or ``median`` - for
each one of the continuous columns.
Returns
-------
df : HandyFrame
A new HandyFrame with filled missing values.
"""
return self._handy.fill(*args, continuous=continuous, categorical=categorical, strategy=strategy)
@inccol
def fence(self, colnames, k=1.5):
"""Caps outliers using lower and upper fences given by Tukey's method,
using 1.5 times the interquartile range (IQR).
The fence values used for capping outliers are kept in ``fences_`` property
and can later be used to generate a corresponding HandyFencer transformer.
For more information, check: https://en.wikipedia.org/wiki/Outlier#Tukey's_fences
Parameters
----------
colnames: list of string
Column names to apply fencing.
k: float, optional
Constant multiplier for the IQR.
Default is 1.5 (corresponding to Tukey's outlier, use 3 for "far out" values)
Returns
-------
df : HandyFrame
A new HandyFrame with capped outliers.
"""
return self._handy.fence(colnames, k=k)
def disassemble(self, colname, new_colnames=None):
"""Disassembles a Vector or Array column into multiple columns.
Parameters
----------
colname: string
Column containing Vector or Array elements.
new_colnames: list of string, optional
Default is None, column names are generated using a sequentially
generated suffix (e.g., _0, _1, etc.) for ``colname``.
If informed, it must have as many column names as elements
in the shortest vector/array of ``colname``.
Returns
-------
df : HandyFrame
A new HandyFrame with the new disassembled columns in addition to
all the existing columns.
"""
return self._handy.disassemble(colname, new_colnames)
def to_metrics_RDD(self, prob_col='probability', label_col='label'):
"""Converts a DataFrame containing predicted probabilities and classification labels
into a RDD suited for use with ``BinaryClassificationMetrics`` object.
Parameters
----------
prob_col: string, optional
Column containing Vectors of probabilities.
Default is 'probability'.
label_col: string, optional
Column containing labels.
Default is 'label'.
Returns
-------
rdd: RDD
RDD of tuples (probability, label)
"""
return self._handy.to_metrics_RDD(prob_col, label_col)
class Bucket(object):
"""Bucketizes a column of continuous values into equal sized bins
to perform stratification.
Parameters
----------
colname: string
Column containing continuous values
bins: integer
Number of equal sized bins to map original values to.
Returns
-------
bucket: Bucket
Bucket object to be used as column in stratification.
"""
def __init__(self, colname, bins=5):
self._colname = colname
self._bins = bins
self._buckets = None
self._clauses = None
def __repr__(self):
return 'Bucket_{}_{}'.format(self._colname, self._bins)
@property
def colname(self):
return self._colname
def _get_buckets(self, df):
check_columns(df, self._colname)
buckets = ([-float('inf')] +
np.linspace(*df.agg(F.min(self._colname),
F.max(self._colname)).rdd.map(tuple).collect()[0],
self._bins + 1).tolist() +
[float('inf')])
buckets[-2] += 1e-7
self._buckets = buckets
return buckets
def _get_clauses(self, buckets):
clauses = []
clauses.append('{} < {:.4f}'.format(self._colname, buckets[1]))
for b, e in zip(buckets[1:-2], buckets[2:-1]):
clauses.append('{} >= {:.4f} and {} < {:.4f}'.format(self._colname, b, self._colname, e))
clauses[-1] = clauses[-1].replace('<', '<=')
clauses.append('{} > {:.4f}'.format(self._colname, buckets[-2]))
self._clauses = clauses
return clauses
class Quantile(Bucket):
"""Bucketizes a column of continuous values into quantiles
to perform stratification.
Parameters
----------
colname: string
Column containing continuous values
bins: integer
Number of quantiles to map original values to.
Returns
-------
quantile: Quantile
Quantile object to be used as column in stratification.
"""
def __repr__(self):
return 'Quantile{}_{}'.format(self._colname, self._bins)
def _get_buckets(self, df):
buckets = ([-float('inf')] +
df.approxQuantile(col=self._colname,
probabilities=np.linspace(0, 1, self._bins + 1).tolist(),
relativeError=0.01) +
[float('inf')])
buckets[-2] += 1e-7
return buckets
class HandyColumns(object):
"""HandyColumn(s) in a HandyFrame.
Attributes
----------
numerical: list of string
List of numerical columns (integer, float, double)
categorical: list of string
List of categorical columns (string, integer)
continuous: list of string
List of continous columns (float, double)
string: list of string
List of string columns (string)
array: list of string
List of array columns (array, map)
"""
def __init__(self, df, handy, strata=None):
self._df = df
self._handy = handy
self._strata = strata
self._colnames = None
self.COLTYPES = {'continuous': self.continuous,
'categorical': self.categorical,
'numerical': self.numerical,
'string': self.string,
'array': self.array}
def __getitem__(self, *args):
if isinstance(args[0], tuple):
args = args[0]
item = args[0]
if self._strata is None:
if self._colnames is None:
if item == slice(None, None, None):
item = self._df.columns
if isinstance(item, str):
try:
# try it as an alias
item = self.COLTYPES[item]
except KeyError:
pass
check_columns(self._df, item)
self._colnames = item
if isinstance(self._colnames, int):
idx = self._colnames + (len(self._handy._group_cols) if self._handy._group_cols is not None else 0)
assert idx < len(self._df.columns), "Invalid column index {}".format(idx)
self._colnames = list(self._df.columns)[idx]
return self
else:
try:
n = item.stop
if n is None:
n = -1
except:
n = 20
if isinstance(self._colnames, (tuple, list)):
res = self._df.notHandy().select(self._colnames)
if n == -1:
if self._df._safety:
print('\nINFO: Safety is ON - returning up to {} instances.'.format(self._df._safety_limit))
n = self._df._safety_limit
if n != -1:
res = res.limit(n)
res = res.toPandas()
self._handy._safety = True
self._df._safety = True
return res
else:
return self._handy.__getitem__(self._colnames, n)
else:
if self._colnames is None:
if item == slice(None, None, None):
item = self._df.columns
if isinstance(item, str):
try:
# try it as an alias
item = self.COLTYPES[item]
except KeyError:
pass
self._strata._handycolumns = item
return self._strata
def __repr__(self):
colnames = ensure_list(self._colnames)
return "HandyColumns[%s]" % (", ".join("%s" % str(c) for c in colnames))
@property
def numerical(self):
"""Returns list of numerical columns in the HandyFrame.
"""
return self._handy._numerical
@property
def categorical(self):
"""Returns list of categorical columns in the HandyFrame.
"""
return self._handy._categorical
@property
def continuous(self):
"""Returns list of continuous columns in the HandyFrame.
"""
return self._handy._continuous
@property
def string(self):
"""Returns list of string columns in the HandyFrame.
"""
return self._handy._string
@property
def array(self):
"""Returns list of array or map columns in the HandyFrame.
"""
return self._handy._array
def mean(self):
return self._handy.mean(self._colnames)
def min(self):
return self._handy.min(self._colnames)
def max(self):
return self._handy.max(self._colnames)
def median(self, precision=.01):
"""Returns approximate median with given precision.
Parameters
----------
precision: float, optional
Default is 0.01
"""
return self._handy.median(self._colnames, precision)
def stddev(self):
return self._handy.stddev(self._colnames)
def var(self):
return self._handy.var(self._colnames)
def percentile(self, perc, precision=.01):
"""Returns approximate percentile with given precision.
Parameters
----------
perc: integer
Percentile to be computed
precision: float, optional
Default is 0.01
"""
return self._handy.percentile(self._colnames, perc, precision)
def q1(self, precision=.01):
"""Returns approximate first quartile with given precision.
Parameters
----------
precision: float, optional
Default is 0.01
"""
return self._handy.q1(self._colnames, precision)
def q3(self, precision=.01):
"""Returns approximate third quartile with given precision.
Parameters
----------
precision: float, optional
Default is 0.01
"""
return self._handy.q3(self._colnames, precision)
def _value_counts(self, dropna=True, raw=True):
assert len(ensure_list(self._colnames)) == 1, "A single column must be selected!"
return self._handy._value_counts(self._colnames, dropna, raw)
def value_counts(self, dropna=True):
"""Returns object containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include counts of missing values.
Returns
-------
counts: Series
"""
assert len(ensure_list(self._colnames)) == 1, "A single column must be selected!"
return self._handy.value_counts(self._colnames, dropna)
def entropy(self):
"""Returns object containing entropy (base 2) of each column.
Returns
-------
entropy: Series
"""
return self._handy.entropy(self._colnames)
def mutual_info(self):
"""Returns object containing matrix of mutual information
between every pair of columns.
Returns
-------
mutual_info: pd.DataFrame
"""
return self._handy.mutual_info(self._colnames)
def mode(self):
"""Returns same-type modal (most common) value for each column.
Returns
-------
mode: Series
"""
colnames = ensure_list(self._colnames)
modes = [self._handy.mode(colname) for colname in colnames]
if len(colnames) == 1:
return modes[0]
else:
return pd.concat(modes, axis=0)
def corr(self, method='pearson'):
"""Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : DataFrame
"""
colnames = [col for col in self._colnames if col in self.numerical]
return self._handy.corr(colnames, method=method)
def nunique(self):
"""Return Series with number of distinct observations for specified columns.
Parameters
----------
exact: boolean, optional
If True, computes exact number of unique values, otherwise uses an approximation.
Returns
-------
nunique: Series
"""
return self._handy.nunique(self._colnames) #, exact)
def outliers(self, ratio=False, method='tukey', **kwargs):
"""Return Series with number of outlier observations according to
the specified method for all columns.
Parameters
----------
ratio: boolean, optional
If True, returns proportion instead of counts.
Default is True.
method: string, optional
Method used to detect outliers. Currently, only Tukey's method is supported.
Default is tukey.
Returns
-------
outliers: Series
"""
return self._handy.outliers(self._colnames, ratio=ratio, method=method, **kwargs)
def get_outliers(self, critical_value=.999):
"""Returns HandyFrame containing all rows deemed as outliers using
Mahalanobis distance and informed critical value.
Parameters
----------
critical_value: float, optional
Critical value for chi-squared distribution to classify outliers
according to Mahalanobis distance.
Default is .999 (99.9%).
"""
return self._handy.get_outliers(self._colnames, critical_value)
def remove_outliers(self, critical_value=.999):
"""Returns HandyFrame containing only rows NOT deemed as outliers
using Mahalanobis distance and informed critical value.
Parameters
----------
critical_value: float, optional
Critical value for chi-squared distribution to classify outliers
according to Mahalanobis distance.
Default is .999 (99.9%).
"""
return self._handy.remove_outliers(self._colnames, critical_value)
def hist(self, bins=10, ax=None):
"""Draws histogram of the HandyFrame's column using matplotlib / pylab.
Parameters
----------
bins : integer, default 10
Number of histogram bins to be used
ax : matplotlib axes object, default None
"""
return self._handy.hist(self._colnames, bins, ax)
def boxplot(self, ax=None, showfliers=True, k=1.5, precision=.01):
"""Makes a box plot from HandyFrame column.
Parameters
----------
ax : matplotlib axes object, default None
showfliers : bool, optional (True)
Show the outliers beyond the caps.
k: float, optional
Constant multiplier for the IQR.
Default is 1.5 (corresponding to Tukey's outlier, use 3 for "far out" values)
"""
return self._handy.boxplot(self._colnames, ax, showfliers, k, precision)
def scatterplot(self, ax=None):
"""Makes a scatter plot of two HandyFrame columns.
Parameters
----------
ax : matplotlib axes object, default None
"""
return self._handy.scatterplot(self._colnames, ax)
class HandyStrata(object):
__handy_methods = (list(filter(lambda n: n[0] != '_',
(map(itemgetter(0),
inspect.getmembers(HandyFrame,
predicate=inspect.isfunction) +
inspect.getmembers(HandyColumns,
predicate=inspect.isfunction)))))) + ['handy']
def __init__(self, handy, strata):
self._handy = handy
self._df = handy._df
self._strata = strata
self._col_clauses = []
self._colnames = []
self._temp_colnames = []
temp_df = self._df
temp_df._handy = self._handy
for col in self._strata:
clauses = []
colname = str(col)
self._colnames.append(colname)
if isinstance(col, Bucket):
self._temp_colnames.append(colname)
buckets = col._get_buckets(self._df)
clauses = col._get_clauses(buckets)
bucketizer = Bucketizer(splits=buckets, inputCol=col.colname, outputCol=colname)
temp_df = HandyFrame(bucketizer.transform(temp_df), self._handy)
self._col_clauses.append(clauses)
self._df = temp_df
self._handy._df = temp_df
self._df._handy = self._handy
value_counts = self._df._handy._value_counts(self._colnames, raw=True).reset_index()
self._raw_combinations = sorted(list(map(tuple, zip(*[value_counts[colname].values
for colname in self._colnames]))))
self._raw_clauses = [' and '.join('{} == {}'.format(str(col), value) if isinstance(col, Bucket)
else '{} == "{}"'.format(str(col),
value[0] if isinstance(value, tuple) else value)
for col, value in zip(self._strata, comb))
for comb in self._raw_combinations]
self._combinations = [tuple(value if not len(clauses) else clauses[int(float(value))]
for value, clauses in zip(comb, self._col_clauses))
for comb in self._raw_combinations]
self._clauses = [' and '.join(value if isinstance(col, Bucket)
else '{} == "{}"'.format(str(col),
value[0] if isinstance(value, tuple) else value)
for col, value in zip(self._strata, comb))
for comb in self._combinations]
self._strat_df = [self._df.filter(clause) for clause in self._clauses]
self._df._strat_handy = self._handy
# Shares the same HANDY object among all sub dataframes
for i, df in enumerate(self._strat_df):
df._strat_index = i
df._strat_handy = self._handy
self._imputed_values = {}
self._handycolumns = None
def __repr__(self):
repr = "HandyStrata[%s]" % (", ".join("%s" % str(c) for c in self._strata))
if self._handycolumns is not None:
colnames = ensure_list(self._handycolumns)
repr = "HandyColumns[%s] by %s" % (", ".join("%s" % str(c) for c in colnames), repr)
return repr
def __getattribute__(self, name):
try:
if name == 'cols':
return HandyColumns(self._df, self._handy, self)
else:
attr = object.__getattribute__(self, name)
return attr
except AttributeError as e:
if name in self.__handy_methods:
def wrapper(*args, **kwargs):
raised = True
try:
# Makes stratification
for df in self._strat_df:
df._handy._strata = self._strata
self._handy._set_stratification(self._strata,
self._raw_combinations, self._raw_clauses,
self._combinations, self._clauses)
if self._handycolumns is not None:
args = (self._handycolumns,) + args
try:
attr_strata = getattr(self._handy, '_strat_{}'.format(name))
self._handy._strata_object = attr_strata(*args, **kwargs)
except AttributeError:
pass
try:
if self._handycolumns is not None:
f = object.__getattribute__(self._handy, name)
else:
f = object.__getattribute__(self._df, name)
is_agg = getattr(f, '__is_agg', False)
is_inccol = getattr(f, '__is_inccol', False)
except AttributeError:
is_agg = False
is_inccol = False
if is_agg or is_inccol:
if self._handycolumns is not None:
colnames = ensure_list(args[0])
else:
colnames = self._df.columns
res = getattr(self._handy, name)(*args, **kwargs)
else:
if self._handycolumns is not None:
res = [getattr(df._handy, name)(*args, **kwargs) for df in self._strat_df]
else:
res = [getattr(df, name)(*args, **kwargs) for df in self._strat_df]
if isinstance(res, pd.DataFrame):
if len(self._handy.strata_colnames):
res = res.set_index(self._handy.strata_colnames).sort_index()
if is_agg:
if len(colnames) == 1:
res = res[colnames[0]]
try:
attr_post = getattr(self._handy, '_post_{}'.format(name))
res = attr_post(res)
except AttributeError:
pass
strata = list(map(lambda v: v[1].to_dict(OrderedDict), self._handy.strata.iterrows()))
strata_cols = [c if isinstance(c, str) else c.colname for c in self._strata]
if isinstance(res, list):
if isinstance(res[0], DataFrame):
joined_df = res[0]
self._imputed_values = joined_df.statistics_
self._fenced_values = joined_df.fences_
if len(res) > 1:
if len(joined_df.statistics_):
self._imputed_values = {self._clauses[0]: joined_df.statistics_}
if len(joined_df.fences_):
self._fenced_values = {self._clauses[0]: joined_df.fences_}
for strat_df, clause in zip(res[1:], self._clauses[1:]):
if len(joined_df.statistics_):
self._imputed_values.update({clause: strat_df.statistics_})
if len(joined_df.fences_):
self._fenced_values.update({clause: strat_df.fences_})
joined_df = joined_df.unionAll(strat_df)
# Clears stratification
self._handy._clear_stratification()
self._df._strat_handy = None
self._df._strat_index = None
if len(self._temp_colnames):
joined_df = joined_df.drop(*self._temp_colnames)
res = HandyFrame(joined_df, self._handy)
res._handy._imputed_values = self._imputed_values
res._handy._fenced_values = self._fenced_values
elif isinstance(res[0], pd.DataFrame):
strat_res = []
indexes = res[0].index.names
if indexes[0] is None:
indexes = ['index']
for r, s in zip(res, strata):
strata_dict = dict([(k if isinstance(k, str) else k.colname, v) for k, v in s.items()])
strat_res.append(r.assign(**strata_dict)
.reset_index())
res = (pd.concat(strat_res)
.sort_values(by=strata_cols)
.set_index(strata_cols + indexes)
.sort_index())
elif isinstance(res[0], pd.Series):
# TODO: TEST
strat_res = []
for r, s in zip(res, strata):
strata_dict = dict([(k if isinstance(k, str) else k.colname, v) for k, v in s.items()])
series_name = none2default(r.name, 0)
if series_name == name:
series_name = 'index'
strat_res.append(r.reset_index()
.rename(columns={series_name: name, 'index': series_name})
.assign(**strata_dict)
.set_index(strata_cols + [series_name])[name])
res = pd.concat(strat_res).sort_index()
if len(ensure_list(self._handycolumns)) > 1:
try:
res = res.astype(np.float64)
res = res.to_frame().reset_index().pivot_table(values=name,
index=strata_cols,
columns=series_name)
res.columns.name = ''
except ValueError:
pass
elif isinstance(res[0], np.ndarray):
# TODO: TEST
strat_res = []
for r, s in zip(res, strata):
strata_dict = dict([(k if isinstance(k, str) else k.colname, v) for k, v in s.items()])
strat_res.append(pd.DataFrame(r, columns=[name])
.assign(**strata_dict)
.set_index(strata_cols)[name])
res = pd.concat(strat_res).sort_index()
elif isinstance(res[0], Axes):
res, axs = self._handy._strata_plot
res = consolidate_plots(res, axs, args[0], self._clauses)
elif isinstance(res[0], list):
joined_list = res[0]
for l in res[1:]:
joined_list += l
return joined_list
elif len(res) == len(self._combinations):
# TODO: TEST
strata_df =
|
pd.DataFrame(strata)
|
pandas.DataFrame
|
import json
import sys
import os
import pandas as pd
from bs4 import BeautifulSoup
import time
import datetime
def create_map_ds(tweets):
new = []
for tweet in tweets:
if tweet['coordinates']:
long_, lat = tweet['coordinates']['coordinates']
source = BeautifulSoup(
tweet['source'], "lxml").text.encode('utf-8')
tw = {
'id': tweet['id'],
'long': long_,
'lat': lat,
'source': source,
'created_at': tweet['created_at'],
'text': ' '.join(tweet['text'].split()),
'user': tweet['user']['screen_name'],
}
new.append(tw)
new = pd.DataFrame(new)
new['id'] = new['id'].astype(str)
return new
def create_hs_ds(tweets):
new = []
for tweet in tweets:
for hs in tweet['entities']['hashtags']:
ts = time.strftime('%Y-%m-%d %H:00:00',
time.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y'))
ts = time.mktime(datetime.datetime.strptime(
ts, "%Y-%m-%d %H:%M:%S").timetuple())
user = tweet['retweeted_status'][
'user'] if 'retweeted_status' in tweet else tweet['user']
tw = {
'created_at': ts,
'text': hs['text'].encode('utf-8'),
'screen_name': user['screen_name']
}
new.append(tw)
new =
|
pd.DataFrame(new)
|
pandas.DataFrame
|
import time
from itertools import combinations
from math import sqrt, log1p, expm1
import numpy as np
import pandas as pd
from random import randint, random, seed, uniform, choice
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
import xgboost as xgb
target, target_id = 'type', 'id'
def create_submission(output_id, output_val, prefix='', compression=False):
filename = 'submission_{}_{}.csv'.format(prefix, time.strftime("%Y-%m-%d-%H-%M"))
output_id = output_id.astype(int)
submission = pd.DataFrame(data={target_id: output_id, target: output_val})
#a.to_frame().join(b.to_frame())
if compression:
filename += '.gz'
print('\nMake submission:{}\n'.format(filename))
submission.to_csv(filename, index=False, header=True, compression='gzip')
else:
print('\nMake submission:{}\n'.format(filename))
submission.to_csv(filename, index=False, header=True)
train = pd.read_csv('../input/train.csv')
test =
|
pd.read_csv('../input/test.csv')
|
pandas.read_csv
|
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assertRaisesRegexp(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp =
|
TimedeltaIndex(['4 days', pd.NaT])
|
pandas.TimedeltaIndex
|
######### Required libraries ##########
# pip install binance-data
# pip install websocket
# pip install pandas
# pip install numpy
# pip install ccxt==1.58.55
# pip install schedule
# pip install animation
# pip install mplfinance
# Importing libraries
from datetime import *
import pandas as pd
import numpy as np
import config
import vars
import ccxt
import time
from binance_data import DataClient
# import animation
import mplfinance as mplf
# For FtxClient ----------------------------------------
import urllib.parse
from typing import Optional, Dict, Any, List
from requests import Request, Session, Response
import hmac
# ------------------------------------------------------
######################## GLOBAL VARIABLES #########################
BINANCE = vars.BINANCE # Variable to indicate which exchange to use: True for BINANCE, False for FTX
SHITCOIN = vars.SHITCOIN
MULTI_TF = vars.MULTI_TF
TF_LIST = vars.TF_LIST
TF = vars.TF
DAYS_BACK = vars.DAYS_BACK # Number of days to look back in time for initial candles data
TRADE_ON = vars.TRADE_ON
LEVELS_PER_TF = vars.LEVELS_PER_TF
###################################################################
LEVEL_COLUMNS = ['side','price','hit','time']
LEVELS = pd.DataFrame(columns=LEVEL_COLUMNS)
UNHIT_LEVELS = LEVELS
# Candles calculation
if 'm' in TF:
TF_NUM = int(TF.split('m')[0]) # Used for FTX candles
NUM_CANDLES = int(DAYS_BACK * 24 * 60 / TF_NUM)
elif 'h' in TF:
TF_NUM = int(TF.split('h')[0]) * 60
NUM_CANDLES = int(DAYS_BACK * 24 * 60 / TF_NUM)
# clock animation (white, default speed)
# clock = ['-','\\','|','/']
# wait_animation = animation.Wait(clock, speed=8)
# Special class with FTX functions to get the candle data.
class FtxClient:
_ENDPOINT = 'https://ftx.com/api/'
def __init__(self, api_key=None, api_secret=None, subaccount_name=None) -> None:
self._session = Session()
self._api_key = api_key
self._api_secret = api_secret
self._subaccount_name = subaccount_name
def _get(self, path: str, params: Optional[Dict[str, Any]] = None) -> Any:
return self._request('GET', path, params=params)
def _post(self, path: str, params: Optional[Dict[str, Any]] = None) -> Any:
return self._request('POST', path, json=params)
def _delete(self, path: str, params: Optional[Dict[str, Any]] = None) -> Any:
return self._request('DELETE', path, json=params)
def _request(self, method: str, path: str, **kwargs) -> Any:
request = Request(method, self._ENDPOINT + path, **kwargs)
self._sign_request(request)
response = self._session.send(request.prepare())
return self._process_response(response)
def _sign_request(self, request: Request) -> None:
ts = int(time.time() * 1000)
prepared = request.prepare()
signature_payload = f'{ts}{prepared.method}{prepared.path_url}'.encode()
if prepared.body:
signature_payload += prepared.body
signature = hmac.new(self._api_secret.encode(), signature_payload, 'sha256').hexdigest()
request.headers['FTX-KEY'] = self._api_key
request.headers['FTX-SIGN'] = signature
request.headers['FTX-TS'] = str(ts)
if self._subaccount_name:
request.headers['FTX-SUBACCOUNT'] = urllib.parse.quote(self._subaccount_name)
def _process_response(self, response: Response) -> Any:
try:
data = response.json()
except ValueError:
response.raise_for_status()
raise
else:
if not data['success']:
raise Exception(data['error'])
return data['result']
def get_klines(self, market_name: str, resolution: int = 3600, limit: int = 1440,
start_time: int = None, end_time: int = None):
# resolution: window length in seconds: 15, 60, 300, 900, 3600, 14400, 86400
# limit: max number to fetch, optional, max 5000
return self._get(f'markets/{market_name}/candles', {'resolution': resolution,
'limit': limit,
'start_time': start_time,
'end_time': end_time})
def get_trades(self, market_name: str, limit: int = 1440, start_time: int = None, end_time: int = None):
# resolution: window length in seconds: 15, 60, 300, 900, 3600, 14400, 86400
# limit: max number to fetch, optional, max 5000
return self._get(f'markets/{market_name}/trades', {'limit': limit,
'start_time': start_time,
'end_time': end_time})
#### EXCHANGE SPECIFICS
if BINANCE: # BINANCE CONNECTION TO USD-M FUTURES
exchange = ccxt.binanceusdm({
'enableRateLimit': True,
"apiKey": config.CCXT_API_KEY,
"secret": config.CCXT_API_SECRET
})
futures = True
client = DataClient(futures=futures)
SYMBOL = SHITCOIN.upper()+'USDT'
C_SYMBOL = SHITCOIN.upper()+'/USDT'
print("Exchange: BINANCE - {}".format(TF))
else: # FTX CONNECTION
SYMBOL = SHITCOIN.upper()+'-PERP'
C_SYMBOL = SYMBOL
exchange = ccxt.ftx({
"apiKey": config.FTX_LVL_API_KEY,
"secret": config.FTX_LVL_API_SECRET,
'enableRateLimit': True,
'headers': {'FTX-SUBACCOUNT': config.FTX_LVL_SUBACCOUNT}
})
client = FtxClient(api_key=config.FTX_LVL_API_KEY, api_secret=config.FTX_LVL_API_SECRET)
print("Exchange: FTX - {}".format(TF))
print("SYMBOL: {}".format(SYMBOL))
if not MULTI_TF:
print("Days back: {} \nNum. Candles: {}".format(DAYS_BACK, NUM_CANDLES))
# Balance variables
MAX_BAL_PER_COIN = vars.MAX_BAL_PER_COIN # Maximum percentage of balance to use per asset/coin
exchange.load_markets(SYMBOL)
MIN_COST = exchange.load_markets(C_SYMBOL)[C_SYMBOL]['limits']['cost']['min']
MIN_AMOUNT = exchange.load_markets(C_SYMBOL)[C_SYMBOL]['limits']['amount']['min']
LVRG = vars.LVRG
# Take Profit Grid Options
TPGRID_MIN_DIST = vars.TPGRID_MIN_DIST # Percentage to use for the closest order in the TP grid
TPGRID_MAX_DIST = vars.TPGRID_MAX_DIST # Percentage to use for the farthest order in the TP grid
TP_ORDERS = vars.TP_ORDERS # Number of orders for the TP grid
DCA_FACTOR_MULT = vars.DCA_FACTOR_MULT
ASSYMMETRIC_TP = vars.ASSYMMETRIC_TP # False for equal sized orders, False for descending size TP orders
MIN_LEVEL_DISTANCE = vars.MIN_LEVEL_DISTANCE
# FUNCTIONS START HERE
def get_ftx_candles(market, interval, limit=5000, start_time=None, end_time=None):
'''
Function that returns a dataframe with the candles information
param 'market': the market to get the candle data from
param 'interval': the interval of the candle data
param 'limit': the number of candles to retrieve (maximum 5000)
param 'start_time': the start time (aka the time of the first candle to get) in timestamp format
param 'end_time': the end time (aka the time of the last candle to get) in timestamp format, if it results in over the limit number, it won't be used
returns: a DataFrame with the candles data
'''
# make sure limtit is below 5000
if limit > 5000:
print(f'Max klines is 5000 per request. Getting 5000 klines instead of {limit}.')
limit = 5000
for _ in range(10):
try:
temp_dict = client.get_klines(market_name=market, resolution=int(interval*60), limit=limit, start_time=start_time, end_time=end_time)
# print(temp_dict)
except Exception as e:
print(e)
print("Failed to get historical kline. Retrying....")
time.sleep(2)
else:
if len(temp_dict) > 0:
break
else:
time.sleep(1)
else: # when all the retries failed
print("(get_historical_klines_simple) Failed 10 times to get historical kline data.")
# If you like, terminate the program (or report this to Discord/LINE/whatever)
#sys.exit(0)
# convert to data frame
df =
|
pd.DataFrame.from_dict(temp_dict)
|
pandas.DataFrame.from_dict
|
import glob
import os
import re
import pandas as pd
import numpy as np
from collections import Counter
path = "files/"
files = glob.glob(path+"*.txt")
# first we need a list of all words in all files.
finalDataframe = pd.DataFrame()
for file in files:
with open(file, mode="r") as f:
data = f.read()
# Split data into array of words, non case sensitive
word = re.split(r"\W+", data, flags=re.IGNORECASE)
# Remove withe spaces and empty strings
cleanWords = [line for line in [l.strip() for l in word] if line]
# Remove duplicates, we don't want them
words = list(set(cleanWords))
# Add data into dictionary
dictionary = {"filename":file, "values":
|
pd.Series(words)
|
pandas.Series
|
# -*- coding: utf-8 -*-
from email.policy import default
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import math
import matplotlib.pyplot as plt
import pandas as pd
import pyro
import torch
from pyro.infer import SVI, TraceMeanField_ELBO
from tqdm import trange
from src.models.prodlda import ProdLDA, plot_word_cloud
@click.command()
@click.argument(
"input_filepath",
type=click.Path(exists=True),
)
@click.argument(
"output_filepath",
type=click.Path(),
)
@click.option(
"--topics",
type=int,
default=10,
)
@click.option(
"--hidden",
type=int,
default=100,
)
@click.option(
"--dropout",
type=float,
default=0.2,
)
@click.option(
"--epochs",
type=int,
default=50,
)
@click.option(
"--batch_size",
type=int,
default=32,
)
@click.option(
"--learning_rate",
type=float,
default=1e-3,
)
def main(
input_filepath,
output_filepath,
topics,
hidden,
dropout,
epochs,
batch_size,
learning_rate,
):
"""Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info("training latent dirichlet allocation model")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
docs_df =
|
pd.read_csv(input_filepath)
|
pandas.read_csv
|
# Collect data into data frames based on the year of the file
import glob
import os
import re
import pandas as pd
from pathlib import Path
import numpy as np
def postalcode_and_area(df):
"""
Split the column "Postal code area" into Postal code (the numerical postal code)
and Area (the name of the area)
:param df: the original data frame
:return: the updated data frame
"""
# Rename the first column
df.rename(columns={df.columns[0]: "Postal code"}, inplace=True)
for c in df.columns:
drop_double_columns(df, c)
# Duplicate the first column and add it as the second column
# df.insert(1, 'Area', df['Postal code'])
df['Area'] = [''] * len(df.index)
# Strip out numbers from the content of the second column:
# only the name of the area will remain.
content = df["Postal code"].copy()
content = content.apply(lambda x: re.sub('[0-9_]+', '', x))
df["Area"] = content
# Strip out the letters and symbols from the content of the first column:
# only the postal code digits will remain.
content = df["Postal code"]
content = content.apply(lambda x: re.sub('[\D]+', '', x))
df["Postal code"] = content
return df
def drop_double_columns(df, header_to_drop):
"""
Drops duplicate columns whose header is 'header_to_drop' in a data frame.
:param df: the original data frame
:param header_to_drop: the column name
:return: None (the data frame is edited inplace)
"""
col_list = df.columns.tolist()
to_drop = get_index_positions(col_list, header_to_drop)
if len(to_drop) > 1:
to_drop = to_drop[1:]
for i in to_drop:
col_list[i] = 'todrop'
df.columns = col_list
df.drop(columns=['todrop'], inplace=True)
def clean_columns(df):
"""
This function cleans the last part of the name of the column,
by removing the pattern (AA), counts how many times each column appears,
and drop the duplicates by calling 'drop_double_columns'.
:param df: the dataframe to clean
:return: the cleaned dataframe
"""
# Saving columns names
new_columns = {x: x for x in df.columns}
for key, value in new_columns.items():
# Removing pattern (AA) from the columns' names
new_columns[key] = re.sub('\([^)]{2}\)', '', value)
# The cleaned names become the new columns in the data frame
df.rename(columns=new_columns, inplace=True)
for c in df.columns:
drop_double_columns(df, c)
return df
def drop_and_replace(df):
"""
This function deletes the row 0 (Finland), drops remaining duplicate columns,
and replaces missing values. Inline comments explain how missing/private values are handled.
:param df: the data frame to clean
:return: the cleaned data frame
"""
# Drop row 0: Finland
df.drop(index=0, inplace=True)
# Drop unnecessary columns that might have been copied until now
for c in df.columns:
drop_double_columns(df, c)
# Replace missing values
# RULES:
# 1) if it's a count, take the min of the column
# and if the min is greater than 15, take 15
# 2) if it's a rate/percentage/average/median, take the median of the column
# MISSING VALUES COME FROM:
# (RA) -> Data on dwellings are protected if there is only one residential building in the area.
# Data on the average floor area of dwellings and type of house are protected if there is only one
# residential building or fewer than 30 dwellings in the area. Protected fields have the value "..".
# Data on free-time residences is not protected.
# (KO) -> Data on educational structure are confidential if the area contains fewer than 30 people aged 18 or over
# (HR) -> Data on income are confidential if the area contains fewer than 30 people aged 18 or over
# (TE) -> Data on size and stage in life of households are confidential if there are fewer than 30 households in the area.
# (TR) -> Data on the income of households are confidential if there are fewer than ten households in the area.
# (TP) -> Data on workplaces are protected if there are fewer than 10 workplaces in the area
# SOURCE:
# https://www.stat.fi/static/media/uploads/tup/paavo/paavo_kuvaus_en.pdf
df.replace({'..': np.nan, '.': np.nan}, inplace=True)
for col_name in df.columns:
if df[col_name].isna().sum() > 0:
filter_on_column = df[df[col_name].notna()]
if 'Average' in col_name or 'average' in col_name \
or 'median' in col_name or 'Median' in col_name \
or 'ratio' in col_name or 'rate' in col_name \
or 'income' in col_name or 'purchasing power' in col_name:
nonzero_minimum = filter_on_column[col_name].median()
else:
nonzero_minimum = filter_on_column[col_name][filter_on_column[col_name].astype('float') > 0].min()
nonzero_minimum = min(float(nonzero_minimum), 15)
df[col_name].fillna(nonzero_minimum, inplace=True)
if sum(df.isna().sum()) > 0:
print("WARNING: There are still missing values!")
df.replace({None: 0}, inplace=True)
df.fillna(0, inplace=True)
return df
def get_index_positions(list_of_elements, element):
"""
Returns the indexes of all occurrences of the given 'element' in
the list of columns 'list_of_elements'.
:param list_of_elements: the list of the columns of the data frame
:param element: the name of the column to find
:return: list of indexes
"""
index_pos_list = []
index_pos = 0
while True:
try:
# Search for item in list from index_pos to the end of list
index_pos = list_of_elements.index(element, index_pos)
# Add the index position in list
index_pos_list.append(index_pos)
index_pos += 1
except ValueError as _:
break
return index_pos_list
def add_density(year, pclist):
"""
Open the file 'density.tsv' in the folder 'data' and return
a dictionary where the keys are postal codes and the values are density values.
NOTE: There is no density for the year 2012.
When asking for 2012, the column 2013 will be returned.
For the years outside the known range, an empty dictionary is returned.
:param year: string, the year to read
:param pclist: list of strings, where each element is a postal code to take
:return: dictionary of postal codes and population density
"""
if year == '2012':
year = '2013'
if year not in ['2012', '2013', '2014', '2015', '2016', '2017']:
print('WARNING: wrong year! Empty Series returned')
return {}
else:
col_name = 'Density (' + year + ')'
dens_df = pd.read_csv(Path('data/demographic/') / 'density.tsv', sep='\t',
usecols=['Postal code', col_name], dtype={'Postal code': object})
dens_df.fillna(0)
dens_df = dens_df[dens_df['Postal code'].isin(pclist)]
return dens_df.copy().set_index('Postal code').to_dict()[col_name]
def add_surface(year, pclist):
"""
Open the file 'surface_area.tsv' in the folder 'data' and return
a dictionary where the keys are postal codes and the values are surface area values.
NOTE: There is no surface area for the year 2012.
When asking for 2012, the column 2013 will be returned.
For the years outside the known range, an empty dictionary is returned.
:param year: string, the year to read
:param pclist: list of strings, where each element is a postal code to take
:return: dictionary of postal codes and surface area
"""
if year == '2012':
year = '2013'
if year not in ['2012', '2013', '2014', '2015', '2016', '2017']:
print('WARNING: wrong year! Empty Series returned')
return dict()
else:
col_name = 'Surface area (' + year + ')'
surface_df = pd.read_csv(Path('data/geographic/') / 'surface_area.tsv', sep='\t',
usecols=['Postal code', col_name], dtype={'Postal code': object, col_name: float})
surface_df.fillna(0)
surface_df = surface_df[surface_df['Postal code'].isin(pclist)]
return surface_df.copy().set_index('Postal code').to_dict()[col_name]
def combine_age_ranges(df):
"""
This function replaces the smaller age ranges with combined larger age ranges
:param df: the data frame
:return: the data frame with combined age ranges
"""
if "0-2 years" not in df.columns:
return
combined_age_ranges = [
["0-15 years", ["0-2 years", "3-6 years", "7-12 years", "13-15 years"]],
["16-34 years", ["16-17 years", "18-19 years", "20-24 years", "25-29 years", "30-34 years"]],
["35-64 years", ["35-39 years", "40-44 years", "45-49 years", "50-54 years", "55-59 years", "60-64 years"]],
["65 years or over", ["65-69 years", "70-74 years", "75-79 years", "80-84 years", "85 years or over"]]
]
for output, input_list in combined_age_ranges:
df[output] = df[input_list].sum(axis=1)
df.drop(input_list, axis=1, inplace=True)
return df
def data_format(df):
"""
Convert each column to the correct format.
:param df: the dataframe
:return: the dataframe
"""
for key in df.columns:
if key == 'Postal code' or key == 'Area':
df[key] = df[key].astype('object')
else:
df[key] = df[key].astype('float')
return df
def get_all_dataframes():
"""
This function loads all the files that have been downloaded
as .csv files by calling `fetch_paavo("")` from `fetch_from_paavo.py`
(last tested version: 13.11.2019).
The files are stored in the folder 'paavo_raw'.
This function assigns the data to the correct data frame according to the year,
and calls drop_and_replace to clean the data frame.
The latest postal codes (from 2017) are taken into account.
:return: df_dic, a dictionary where the keys are the years
and the values are the corresponding data frames, and
common_columns, a list of common columns as found by 'get_common_columns'
"""
# Read all the files with the data
file_list = glob.glob("data/paavo/paavo_raw/*.csv")
# Create a sorted list of the years: remove A-Öa-ö_.,:; /()\\-
years_list = sorted({re.sub('[\D]+', '', y) for y in file_list})
print("Preparing data: ", years_list)
# Build a dictionary of DataFrames:
# to each year, it is associated one DataFrame
df_dic = {y: pd.DataFrame() for y in years_list}
# Dataframe 2017 --> we need 2017 first, since we want to refer to the latest postal codes and areas
for file in file_list:
if '2017' in file:
if df_dic['2017'].size == 0:
df_dic['2017'] = pd.read_csv(file, sep=',', skiprows=0, encoding='iso-8859-1')
else:
# Open the file in a temporary data frame
temp = pd.read_csv(file, sep=',', skiprows=0, encoding='iso-8859-1')
# Merge the temporary data frame to the main one
df_dic['2017'] = df_dic['2017'].join(temp, lsuffix='_caller')
df_dic['2017'] = drop_and_replace(df_dic['2017'])
df_dic['2017'] = postalcode_and_area(df_dic['2017'])
postalcodes_list = df_dic['2017']['Postal code'].values
years_list.remove('2017')
# Fill the DataFrames with the data from the correct files
for file in file_list:
for y in years_list:
if df_dic[y].size == 0:
df_dic[y]['Postal code'] = df_dic['2017']['Postal code'].copy()
df_dic[y]['Area'] = df_dic['2017']['Area'].copy()
if y in file:
# Open the file in a temporary dataframe
temp = pd.read_csv(file, sep=',', skiprows=0, encoding='iso-8859-1')
temp = drop_and_replace(temp)
temp = postalcode_and_area(temp)
temp = data_format(temp)
idx =
|
pd.Index(temp['Postal code'].values)
|
pandas.Index
|
import numpy as np
import pandas as pd
from sklearn.svm import SVR
import matplotlib.pyplot as plt
class Prediction:
# samples and features
dates = []
prices = []
filename = ''
def __init__(self, filename, dates, prices):
self.dates = dates
self.prices = prices
self.filename = filename
# get data from csv file of last 30 days
def get_data(self):
df = self.clean_data()
date_series =
|
pd.DatetimeIndex(df['day'])
|
pandas.DatetimeIndex
|
from econlib.linearmodel import LinearModel
import pandas as pd
y =
|
pd.DataFrame([1, 2, 3, 4, 5])
|
pandas.DataFrame
|
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Meanstress routines
===================
Mean stress transformation methods
----------------------------------
* FKM Goodman
* Five Segment Correction
'''
__author__ = "<NAME>, <NAME>"
__maintainer__ = "<NAME>"
import numpy as np
import pandas as pd
import pylife
from pylife.stress import stresssignal
from pylife.core import *
@pd.api.extensions.register_dataframe_accessor("meanstress_mesh")
class MeanstressMesh(stresssignal.CyclicStress):
def FKM_goodman(self, haigh, R_goal):
haigh.FKM_Goodman
Sa = self._obj.sigma_a.to_numpy()
Sm = self._obj.sigma_m.to_numpy()
Sa_transformed = FKM_goodman(Sa, Sm, haigh.M, haigh.M2, R_goal)
return pd.DataFrame({'sigma_a': Sa_transformed, 'R': np.ones_like(Sa_transformed) * R_goal},
index=self._obj.index)
def five_segment(self, haigh, R_goal):
haigh.haigh_five_segment
Sa = self._obj.sigma_a.to_numpy()
Sm = self._obj.sigma_m.to_numpy()
Sa_transformed = five_segment_correction(Sa, Sm,
haigh.M0, haigh.M1, haigh.M2, haigh.M3, haigh.M4,
haigh.R12, haigh.R23,
R_goal)
return pd.DataFrame({'sigma_a': Sa_transformed, 'R': np.ones_like(Sa_transformed) * R_goal},
index=self._obj.index)
@pd.api.extensions.register_series_accessor("meanstress_hist")
class MeanstressHist:
def __init__(self, df):
if df.index.names == ['from', 'to']:
f = df.index.get_level_values('from').mid
t = df.index.get_level_values('to').mid
self._Sa = np.abs(f-t)/2.
self._Sm = (f+t)/2.
self._binsize_x = df.index.get_level_values('from').length.min()
self._binsize_y = df.index.get_level_values('to').length.min()
elif df.index.names == ['range', 'mean']:
self._Sa = df.index.get_level_values('range').mid / 2.
self._Sm = df.index.get_level_values('mean').mid
self._binsize_x = df.index.get_level_values('range').length.min()
self._binsize_y = df.index.get_level_values('mean').length.min()
else:
raise AttributeError("MeanstressHist needs index names either ['from', 'to'] or ['range', 'mean']")
self._df = df
def FKM_goodman(self, haigh, R_goal):
haigh.FKM_Goodman
Dsig = FKM_goodman(self._Sa, self._Sm, haigh.M, haigh.M2, R_goal) * 2.
return self._rebin_results(Dsig)
def five_segment(self, haigh, R_goal):
haigh.haigh_five_segment
Dsig = five_segment_correction(self._Sa, self._Sm,
haigh.M0, haigh.M1, haigh.M2, haigh.M3, haigh.M4, haigh.R12,
haigh.R23, R_goal) * 2.
return self._rebin_results(Dsig)
def _rebin_results(self, Dsig):
Dsig_max = Dsig.max()
binsize = np.hypot(self._binsize_x, self._binsize_y) / np.sqrt(2.)
bincount = int(np.ceil(Dsig_max / binsize))
new_idx = pd.IntervalIndex.from_breaks(np.linspace(0, Dsig_max, bincount), name="range")
result = pd.Series(data=np.zeros(bincount-1), index=new_idx, name='cycles', dtype=np.int32)
for i, intv in enumerate(new_idx):
cond = np.logical_and(Dsig >= intv.left, Dsig < intv.right)
result.loc[intv] = np.int32(np.sum(self._df.values[cond]))
result.iloc[-1] += np.int32(np.sum(self._df.values[Dsig == Dsig_max]))
return result
@
|
pd.api.extensions.register_dataframe_accessor("FKM_Goodman")
|
pandas.api.extensions.register_dataframe_accessor
|
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 =
|
DataFrame(columns=[1] * 100)
|
pandas.DataFrame
|
"""
Import as:
import helpers.hdatetime as hdateti
"""
# TODO(gp): -> hdatetime
import asyncio
import calendar
import datetime
import logging
import re
from typing import Callable, Iterable, Optional, Tuple, Union, cast
_WARNING = "\033[33mWARNING\033[0m"
try:
import dateutil.parser as dparse
except ModuleNotFoundError:
_module = "dateutil"
print(_WARNING + f": Can't find {_module}: continuing")
import pandas as pd # noqa: E402 # pylint: disable=wrong-import-position
# TODO(gp): Check if dateutils is equivalent or better so we can simplify the
# dependencies.
try:
import pytz
except ModuleNotFoundError:
_module = "pytz"
print(_WARNING + f": Can't find {_module}: continuing")
import helpers.hdbg as hdbg # noqa: E402 # pylint: disable=wrong-import-position
_LOG = logging.getLogger(__name__)
# We use the type `Datetime` to allow flexibility in the interface exposed to client.
# The typical pattern is:
# - we call `to_datetime()`, as soon as we enter functions exposed to users,
# to convert the user-provided datetime into a `datetime.datetime`
# - we use only `datetime.datetime` in the private interfaces
#
# It's often worth to import this file even for just the type `Datetime`,
# since typically as soon as the caller uses this type, they also want to use
# `to_datetime()` and `dassert_*()` functions.
# TODO(gp): It would be better to call this `GeneralDateTime`, `FlexibleDateTime`,
# and rename `StrictDateTime` -> `DateTime`.
Datetime = Union[str, pd.Timestamp, datetime.datetime]
# The type `StrictDateTime` is for stricter interfaces, although it is a bit of a
# compromise.
# Either one wants to allow everything that can be interpreted as a datetime (and
# then use `Datetime`), or strict (and then use only `datetime.datetime`).
StrictDatetime = Union[pd.Timestamp, datetime.datetime]
def dassert_is_datetime(datetime_: Datetime) -> None:
"""
Assert that `datetime_` is of type `Datetime`.
"""
hdbg.dassert_isinstance(
datetime_,
(str, pd.Timestamp, datetime.datetime),
"datetime_='%s' of type '%s' is not a DateTimeType",
datetime_,
str(type(datetime_)),
)
def dassert_is_strict_datetime(datetime_: StrictDatetime) -> None:
"""
Assert that `datetime_` is of type `StrictDatetime`.
"""
hdbg.dassert_isinstance(
datetime_,
(pd.Timestamp, datetime.datetime),
"datetime_='%s' of type '%s' is not a StrictDateTimeType",
datetime_,
str(type(datetime_)),
)
def to_datetime(datetime_: Datetime) -> datetime.datetime:
"""
Assert that datetime_ is a possible datetime.
:return: tz-aware or naive datetime.datetime
"""
# TODO(Grisha): also pass timezone.
dassert_is_datetime(datetime_)
if isinstance(datetime_, str):
datetime_ = pd.Timestamp(datetime_)
if isinstance(datetime_, pd.Timestamp):
datetime_ = datetime_.to_pydatetime()
return datetime_ # type: ignore
def dassert_is_tz_naive(datetime_: StrictDatetime) -> None:
"""
Assert that the passed timestamp is tz-naive, i.e., doesn't have timezone
info.
"""
hdbg.dassert_is(
datetime_.tzinfo, None, "datetime_='%s' is not tz naive", datetime_
)
def dassert_has_tz(datetime_: StrictDatetime) -> None:
"""
Assert that the passed timestamp has timezone info.
"""
hdbg.dassert_is_not(
datetime_.tzinfo,
None,
"datetime_='%s' doesn't have timezone info",
datetime_,
)
def dassert_has_specified_tz(
datetime_: StrictDatetime, tz_zones: Iterable[str]
) -> None:
"""
Assert that the passed timestamp has the timezone passed in `tz_zones`.
"""
# Make sure that the passed timestamp has timezone information.
dassert_has_tz(datetime_)
# Get the timezone.
tz_info = datetime_.tzinfo
tz_zone = tz_info.zone # type: ignore
has_expected_tz = tz_zone in tz_zones
hdbg.dassert(
has_expected_tz,
"datetime_=%s (type=%s) tz_info=%s tz_info.zone=%s instead of tz_zones=%s",
datetime_,
type(datetime_),
tz_info,
tz_zone,
tz_zones,
)
def dassert_has_UTC_tz(datetime_: StrictDatetime) -> None:
"""
Assert that the passed timestamp is UTC.
"""
tz_zones = (pytz.timezone("UTC").zone,)
dassert_has_specified_tz(datetime_, tz_zones)
def dassert_has_ET_tz(datetime_: StrictDatetime) -> None:
"""
Assert that the passed timestamp is Eastern Time (ET).
"""
tz_zones = (
pytz.timezone("US/Eastern").zone,
pytz.timezone("America/New_York").zone,
)
dassert_has_specified_tz(datetime_, tz_zones)
def dassert_tz_compatible(
datetime1: StrictDatetime, datetime2: StrictDatetime
) -> None:
"""
Assert that two timestamps are both naive or both have timezone info.
"""
dassert_is_strict_datetime(datetime1)
dassert_is_strict_datetime(datetime2)
has_tz1 = datetime1.tzinfo is not None
has_tz2 = datetime2.tzinfo is not None
hdbg.dassert_eq(
has_tz1,
has_tz2,
"datetime1='%s' and datetime2='%s' are not compatible",
str(datetime1),
str(datetime2),
)
# TODO(gp): Replace this check with compatibility between series vs scalar.
# def dassert_srs_tz_compatible(
# def dassert_srs_has_tz
# def dassert_srs_is_tz_naive
def dassert_tz_compatible_timestamp_with_df(
datetime_: StrictDatetime,
df: pd.DataFrame,
col_name: Optional[str],
) -> None:
"""
Assert that timestamp and a df column are both naive or both have timezone
info.
:param col_name: col_name. `None` represents the index.
"""
dassert_is_strict_datetime(datetime_)
hdbg.dassert_isinstance(df, pd.DataFrame)
if df.empty:
return
if col_name is None:
# We assume that the first element in the index is representative.
df_datetime = df.index[0]
else:
hdbg.dassert_in(col_name, df.columns)
df_datetime = df[col_name].iloc[0]
dassert_tz_compatible(df_datetime, datetime_)
# #############################################################################
def get_UTC_tz() -> datetime.tzinfo:
"""
Return the UTC timezone.
"""
return pytz.timezone("UTC")
def get_ET_tz() -> datetime.tzinfo:
"""
Return the US Eastern Time timezone.
"""
# TODO(Grisha): -> `US/Eastern`?
# It appears that "America/New_York" is to be preferred over "US/Eastern".
# https://www.iana.org/time-zones
# https://en.wikipedia.org/wiki/Tz_database
return pytz.timezone("America/New_York")
# Function returning the current (true, replayed, simulated) wall-clock time as a
# timestamp.
GetWallClockTime = Callable[[], pd.Timestamp]
# TODO(gp): -> get_wall_clock_time
def get_current_time(
tz: str, event_loop: Optional[asyncio.AbstractEventLoop] = None
) -> pd.Timestamp:
"""
Return current time in UTC / ET timezone or as a naive time.
This should be the only way to get the current wall-clock time,
since it handles both wall-clock time and "simulated" wall-clock
time through asyncio.
:param tz: how to represent the returned time (e.g., "UTC", "ET", "naive")
"""
if event_loop is not None:
# We accept only `hasyncio.EventLoop` here. If we are using standard asyncio
# EventLoop we rely on wall-clock time instead of `loop.time()`.
hdbg.dassert_isinstance(event_loop, asyncio.AbstractEventLoop)
timestamp = event_loop.get_current_time()
else:
# Use true real-time.
timestamp = datetime.datetime.utcnow()
# Convert it into the right
timestamp = pd.Timestamp(timestamp, tz=get_UTC_tz())
if tz == "UTC":
pass
elif tz == "ET":
timestamp = timestamp.tz_convert(get_ET_tz())
elif tz == "naive_UTC":
timestamp = timestamp.replace(tzinfo=None)
elif tz == "naive_ET":
timestamp = timestamp.tz_convert(get_ET_tz())
timestamp = timestamp.replace(tzinfo=None)
else:
raise ValueError(f"Invalid tz='{tz}'")
return timestamp
def get_current_timestamp_as_string(tz: str) -> str:
"""
Return the current time in the format `YYYYMMDD_HHMMSS` (e.g.,
20210728_221734).
Note that no information about the timezone is returned. Thus the
same time corresponds to `20210728_171749` for tz="ET" and
`20210728_221749` for tz="UTC".
"""
timestamp = get_current_time(tz)
ret = timestamp.strftime("%Y%m%d-%H%M%S")
ret = cast(str, ret)
return ret
# #############################################################################
def to_generalized_datetime(
dates: Union[pd.Series, pd.Index], date_standard: Optional[str] = None
) -> Union[pd.Series, pd.Index]:
"""
Convert string dates to datetime.
This works like `pd.to_datetime`, but supports more date formats and shifts
the dates to the end of period instead of the start.
:param dates: series or index of dates to convert
:param date_standard: "standard" or "ISO_8601", `None` defaults to
"standard"
:return: datetime dates
"""
# This function doesn't deal with mixed formats.
hdbg.dassert_isinstance(dates, Iterable)
hdbg.dassert(not isinstance(dates, str))
# Try converting to datetime using `pd.to_datetime`.
format_example_index = -1
date_example = dates.tolist()[format_example_index]
format_fix = _handle_incorrect_conversions(date_example)
if format_fix is not None:
format_, date_modification_func = format_fix
dates = dates.map(date_modification_func)
date_example = dates.tolist()[format_example_index]
else:
format_ = None
datetime_dates = pd.to_datetime(dates, format=format_, errors="coerce")
# Shift to end of period if conversion has been successful.
if not pd.isna(datetime_dates).all():
datetime_example = datetime_dates.tolist()[format_example_index]
if (
not pd.isna(datetime_example)
and datetime_example.strftime("%Y-%m-%d") == date_example
):
return datetime_dates
shift_func = _shift_to_period_end(date_example)
if shift_func is not None:
datetime_dates = datetime_dates.map(shift_func)
return datetime_dates
# If standard conversion fails, attempt our own conversion.
date_standard = date_standard or "standard"
format_determination_output = _determine_date_format(
date_example, date_standard
)
if format_determination_output is None:
return datetime_dates
format_, date_modification_func = format_determination_output
dates = dates.map(date_modification_func)
return pd.to_datetime(dates, format=format_)
def _handle_incorrect_conversions(
date: str,
) -> Optional[Tuple[Optional[str], Callable[[str], str]]]:
"""
Change data pre-processing for cases when `pd.to_datetime` is mistaken.
:param date: string date
:return: date format and a function to apply to string dates before passing
them into `pd.to_datetime()`
"""
if len(date) in [7, 8]:
# "2021-M2" is transformed to '2020-01-01 00:00:01' by
# `pd.to_datetime`.
if date[:4].isdigit() and date[4] in ["-", ".", "/"] and date[5] == "M":
def modify_monthly_date(x: str) -> str:
year_number = int(x[:4])
month_number = x[6:]
num_days_in_month = calendar.monthrange(
year_number, int(month_number)
)[1]
modified_x = f"{x[:4]}-{month_number}-{num_days_in_month}"
return modified_x
return "%Y-%m-%d", modify_monthly_date
return None
def _shift_to_period_end( # pylint: disable=too-many-return-statements
date: str,
) -> Optional[Callable[[StrictDatetime], StrictDatetime]]:
"""
Get function to shift the dates to the end of period.
:param date: string date
:return: a function to shift the dates to the end of period. If `None`, no
shift is needed
"""
def shift_to_month_end(x: StrictDatetime) -> StrictDatetime:
return x + pd.offsets.MonthEnd(0)
def shift_to_quarter_end(x: StrictDatetime) -> StrictDatetime:
return x + pd.offsets.QuarterEnd(0)
def shift_to_year_end(x: StrictDatetime) -> StrictDatetime:
return x +
|
pd.offsets.YearEnd(0)
|
pandas.offsets.YearEnd
|
import os.path as osp
from PIL import Image
import numpy as np
import pandas as pd
import random
import torch
from torch.utils.data import Dataset
from torchvision import transforms
ROOT_PATH = './materials/'
class MiniImageNet(Dataset):
def __init__(self, setname):
csv_path = osp.join(ROOT_PATH, setname + '.csv')
lines = [x.strip() for x in open(csv_path, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.wnids = []
for l in lines:
name, wnid = l.split(',')
path = osp.join(ROOT_PATH, 'images', name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
self.data = data
self.label = label
self.transform = transforms.Compose([
transforms.Resize(84),
transforms.CenterCrop(84),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.data)
def __getitem__(self, i):
path, label = self.data[i], self.label[i]
image = self.transform(Image.open(path).convert('RGB'))
return image, label
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
def splitImageNet(sup_ratio):
setname = 'train'
csv_path = osp.join(ROOT_PATH, setname + '.csv')
unsup_path = osp.join(ROOT_PATH,'unsup' + '.csv')
sup_path = osp.join(ROOT_PATH,'sup' + '.csv')
df = pd.read_csv(csv_path)
msk = np.random.rand(len(df)) < 0.4
print('Number of supervised samples: {}'.format(sum(msk)))
print('Number of unsupervised samples: {}'.format(sum(~msk)))
df[~msk].to_csv(unsup_path, index = False)
df[msk].to_csv(sup_path, index = False)
def splitIm(splits):
setname = 'train'
csv_path = osp.join(ROOT_PATH, setname + '.csv')
df =
|
pd.read_csv(csv_path)
|
pandas.read_csv
|
import pandas as pd
import logging
import sys
import os
import numpy as np
from itertools import chain
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
np.random.seed(42)
class Streamgraph(object):
def __init__(self, loglevel="INFO"):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(loglevel)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
handler.setLevel(loglevel)
self.logger.addHandler(handler)
def get_streamgraph_data(self, metadata, query, n=12, method="tfidf"):
df = pd.DataFrame.from_records(metadata)
df.year =
|
pd.to_datetime(df.year)
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
from datamart.profilers.helpers import feature_compute_hih as fc_hih, feature_compute_lfh as fc_lfh
computable_metafeatures = [
'ratio_of_values_containing_numeric_char', 'ratio_of_numeric_values',
'number_of_outlier_numeric_values', 'num_filename', 'number_of_tokens_containing_numeric_char',
'number_of_numeric_values_equal_-1', 'most_common_numeric_tokens', 'most_common_tokens',
'ratio_of_distinct_tokens', 'number_of_missing_values',
'number_of_distinct_tokens_split_by_punctuation', 'number_of_distinct_tokens',
'ratio_of_missing_values', 'semantic_types', 'number_of_numeric_values_equal_0',
'number_of_positive_numeric_values', 'most_common_alphanumeric_tokens',
'numeric_char_density', 'ratio_of_distinct_values', 'number_of_negative_numeric_values',
'target_values', 'ratio_of_tokens_split_by_punctuation_containing_numeric_char',
'ratio_of_values_with_leading_spaces', 'number_of_values_with_trailing_spaces',
'ratio_of_values_with_trailing_spaces', 'number_of_numeric_values_equal_1',
'natural_language_of_feature', 'most_common_punctuations', 'spearman_correlation_of_features',
'number_of_values_with_leading_spaces', 'ratio_of_tokens_containing_numeric_char',
'number_of_tokens_split_by_punctuation_containing_numeric_char', 'number_of_numeric_values',
'ratio_of_distinct_tokens_split_by_punctuation', 'number_of_values_containing_numeric_char',
'most_common_tokens_split_by_punctuation', 'number_of_distinct_values',
'pearson_correlation_of_features']
default_metafeatures = [
'ratio_of_values_containing_numeric_char', 'ratio_of_numeric_values',
'number_of_outlier_numeric_values', 'num_filename', 'number_of_tokens_containing_numeric_char']
class DSboxProfiler(object):
"""
data profiler moduel. Now only supports csv data.
Parameters:
----------
_punctuation_outlier_weight: a integer
the coefficient used in outlier detection for punctuation. default is 3
_numerical_outlier_weight
_token_delimiter: a string
delimiter that used to seperate tokens, default is blank space " ".
_detect_language: boolean
true: do detect language; false: not detect language
_topk: a integer
Attributes:
----------
"""
def __init__(self, compute_features=None) -> None:
# All other attributes must be private with leading underscore
self._punctuation_outlier_weight = 3
self._numerical_outlier_weight = 3
self._token_delimiter = " "
self._detect_language = False
self._topk = 10
# list of specified features to compute
self._specified_features = compute_features if compute_features else default_metafeatures
def profile(self, inputs: pd.DataFrame, metadata: dict) -> dict:
"""Save metadata json to file.
Args:
inputs: pandas dataframe
metadata: dict
Returns:
dict
"""
metadata = self._profile_data(inputs, metadata)
return metadata
def _profile_data(self, data: pd.DataFrame, metadata: dict) -> dict:
"""Save metadata json to file.
Args:
data: pandas dataframe
metadata: dict
Returns:
dict with dsbox profiled fields
"""
# STEP 1: data-level calculations
if "pearson_correlation_of_features" in self._specified_features:
corr_pearson = data.corr()
corr_columns = list(corr_pearson.columns)
if "spearman_correlation_of_features" in self._specified_features:
corr_spearman = data.corr(method='spearman')
corr_columns = list(corr_spearman.columns)
# STEP 2: column-level calculations
column_counter = -1
for column_name in data:
column_counter += 1
col = data[column_name]
# dict: map feature name to content
each_res = dict()
if "spearman_correlation_of_features" in self._specified_features and column_name in corr_columns:
stats_sp = corr_spearman[column_name].describe()
each_res["spearman_correlation_of_features"] = {
'min': stats_sp['min'],
'max': stats_sp['max'],
'mean': stats_sp['mean'],
'median': stats_sp['50%'],
'std': stats_sp['std']
}
if "spearman_correlation_of_features" in self._specified_features and column_name in corr_columns:
stats_pr = corr_pearson[column_name].describe()
each_res["pearson_correlation_of_features"] = {
'min': stats_pr['min'],
'max': stats_pr['max'],
'mean': stats_pr['mean'],
'median': stats_pr['50%'],
'std': stats_pr['std']
}
if col.dtype.kind in np.typecodes['AllInteger'] + 'uMmf':
if "number_of_missing_values" in self._specified_features:
each_res["number_of_missing_values"] = pd.isnull(col).sum()
if "ratio_of_missing_values" in self._specified_features:
each_res["ratio_of_missing_values"] = pd.isnull(col).sum() / col.size
if "number_of_distinct_values" in self._specified_features:
each_res["number_of_distinct_values"] = col.nunique()
if "ratio_of_distinct_values" in self._specified_features:
each_res["ratio_of_distinct_values"] = col.nunique() / float(col.size)
if col.dtype.kind == 'b':
if "most_common_raw_values" in self._specified_features:
fc_hih.compute_common_values(col.dropna().astype(str), each_res, self._topk)
elif col.dtype.kind in np.typecodes['AllInteger'] + 'uf':
fc_hih.compute_numerics(col, each_res,
self._specified_features) # TODO: do the checks inside the function
if "most_common_raw_values" in self._specified_features:
fc_hih.compute_common_values(col.dropna().astype(str), each_res, self._topk)
else:
# Need to compute str missing values before fillna
if "number_of_missing_values" in self._specified_features:
each_res["number_of_missing_values"] = pd.isnull(col).sum()
if "ratio_of_missing_values" in self._specified_features:
each_res["ratio_of_missing_values"] =
|
pd.isnull(col)
|
pandas.isnull
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 19:40:20 2021
@author: <NAME>
"""
import yfinance as yf
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
class stockstats:
def __init__(self,ticker,start,end):
self.ticker=ticker;
self.start=start;
self.end=end;
self.S=yf.download(self.ticker,
self.start,
self.end);
self.o=self.S.Open
self.h=self.S.High
self.l=self.S.Low
self.c=self.S.Close
def Stock_Price(self):
return self.S;
def HV(self):
hv=pd.DataFrame()
for s in self.ticker:
hv[s]=[np.log(self.c[s]/self.c[s].shift(1)).std()];
return hv;
def RSHV(self):
rshv=
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 15:10:42 2018
@author: amogh
"""
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
#Load dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1:].values
#Label Encoder
labelencoder = LabelEncoder()
X[:, 3] = labelencoder.fit_transform(X[:, 3])
#OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
df_X =
|
pd.DataFrame(X)
|
pandas.DataFrame
|
import logging
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
import sentry_sdk
from solarforecastarbiter import utils
def _make_aggobs(obsid, ef=pd.Timestamp('20191001T1100Z'),
eu=None, oda=None):
return {
'observation_id': obsid,
'effective_from': ef,
'effective_until': eu,
'observation_deleted_at': oda
}
nindex = pd.date_range(start='20191004T0000Z',
freq='1h', periods=10)
@pytest.fixture()
def ids():
return ['f2844284-ea0a-11e9-a7da-f4939feddd82',
'f3e310ba-ea0a-11e9-a7da-f4939feddd82',
'09ed7cf6-ea0b-11e9-a7da-f4939feddd82',
'0fe9f2ba-ea0b-11e9-a7da-f4939feddd82',
'67ea9200-ea0e-11e9-832b-f4939feddd82']
@pytest.fixture()
def aggobs(ids):
return tuple([
_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191004T0501Z')),
_make_aggobs(ids[2], eu=pd.Timestamp('20191004T0400Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0700Z'),
eu=pd.Timestamp('20191004T0800Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0801Z')),
_make_aggobs(ids[3], oda=pd.Timestamp('20191005T0000Z')),
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191003T0000Z'))
])
def test_compute_aggregate(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_missing_from_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
aggobs = list(aggobs[:-2]) + [
_make_aggobs('09ed7cf6-ea0b-11e9-a7da-f4939fed889')]
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_empty_data(aggobs, ids):
data = {}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:2], nindex)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_missing_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
data[ids[-1]] = pd.DataFrame({'value': [1] * 8, 'quality_flag': [0] * 8},
index=nindex[:-2])
aggobs = list(aggobs[:-2]) + [_make_aggobs(ids[-1])]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series(
[3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, None, None],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_deleted_not_removed(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids}
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_not_removed_yet(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# with last aggobs, would try and get data before effective_until,
# but was deleted, so raise error
aggobs = list(aggobs[:-2]) + [
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191004T0700Z'))]
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_but_removed_before(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# aggobs[-1] properly removed
aggobs = list(aggobs[:-2]) + [aggobs[-1]]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}))
def test_compute_aggregate_mean(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'mean', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([1.0] * 10, index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_no_overlap(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3], 'quality_flag': [2, 10, 338]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0230Z'])),
ids[1]: pd.DataFrame(
{'value': [3, 2, 1], 'quality_flag': [9, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0200Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'median', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, None, 2.5, None],
'quality_flag': [2, 10, 9, 338 | 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_before_effective(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3, 0, 0], 'quality_flag': [2, 10, 338, 0, 0]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z'])),
ids[1]: pd.DataFrame(
{'value': [None, 2.0, 1.0], 'quality_flag': [0, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0201Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'max', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, 3.0, 2.0, 1.0],
'quality_flag': [2, 10, 338, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_bad_cols():
data = {'a': pd.DataFrame([0], index=pd.DatetimeIndex(
['20191001T1200Z']))}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC',
'mean', [_make_aggobs('a')])
def test_compute_aggregate_index_provided(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
the_index = nindex.copy()[::2]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], the_index)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 3.0],
index=the_index),
'quality_flag': pd.Series([0]*5, index=the_index)})
)
@pytest.mark.parametrize('dfindex,missing_idx', [
(pd.date_range(start='20191004T0000Z', freq='1h', periods=11), -1),
(pd.date_range(start='20191003T2300Z', freq='1h', periods=11), 0),
])
def test_compute_aggregate_missing_values_with_index(
aggobs, ids, dfindex, missing_idx):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], dfindex)
assert pd.isnull(agg['value'][missing_idx])
def test_compute_aggregate_partial_missing_values_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
data[ids[2]] = pd.DataFrame({'value': [1] * 5, 'quality_flag': [0] * 5},
index=nindex[5:])
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], nindex)
expected = pd.DataFrame(
{'value': pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}
)
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_obs_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
aggobs[:-2], nindex)
def test_compute_aggregate_out_of_effective(aggobs, ids):
limited_aggobs = [aggob
for aggob in aggobs
if aggob['effective_until'] is not None]
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
max_time = pd.Series([o['effective_until'] for o in limited_aggobs]).max()
ooe_index = pd.date_range(
max_time + pd.Timedelta('1H'),
max_time + pd.Timedelta('25H'),
freq='60min'
)
with pytest.raises(ValueError) as e:
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
limited_aggobs, ooe_index)
assert str(e.value) == 'No effective observations in data'
def test__observation_valid(aggobs):
out = utils._observation_valid(
nindex, 'f2844284-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(True, index=nindex))
def test__observation_valid_ended(aggobs):
out = utils._observation_valid(
nindex, 'f3e310ba-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series([False] * 6 + [True] * 4,
index=nindex))
def test__observation_valid_many(aggobs):
out = utils._observation_valid(
nindex, '09ed7cf6-ea0b-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(
[True, True, True, True, True, False, False, True, True, True],
index=nindex))
def test__observation_valid_deleted(aggobs):
with pytest.raises(ValueError):
utils._observation_valid(
nindex, '0fe9f2ba-ea0b-11e9-a7da-f4939feddd82', aggobs)
def test__observation_valid_deleted_before(aggobs):
out = utils._observation_valid(
nindex, '67ea9200-ea0e-11e9-832b-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(False, index=nindex))
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0745Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z',
'20191004T0800Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
])
def test__make_aggregate_index(length, label, expected):
test_data = {
0: pd.DataFrame(range(5), index=pd.date_range(
'20191004T0700Z', freq='7min', periods=5)), # end 35
1: pd.DataFrame(range(4), index=pd.date_range(
'20191004T0015-0700', freq='10min', periods=4))} # end 45
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0715Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z'])),
])
def test__make_aggregate_index_offset_right(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0701Z', freq='7min', periods=6)) # end 35
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0645Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0600Z',
'20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0640Z', '20191004T0700Z', '20191004T0720Z'])),
('36min', 'ending', pd.DatetimeIndex(['20191004T0712Z',
'20191004T0748Z'])),
('36min', 'beginning', pd.DatetimeIndex(['20191004T0636Z',
'20191004T0712Z'])),
])
def test__make_aggregate_index_offset_left(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0658Z', freq='7min', periods=6)) # end 32
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_tz():
length = '30min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T1600Z', freq='5min', periods=6)) # end 30
}
expected = pd.DatetimeIndex(['20190101T0900'],
tz='America/Denver')
out = utils._make_aggregate_index(test_data, length, label,
'America/Denver')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_invalid_length():
length = '33min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0158Z', freq='7min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
def test__make_aggregate_index_instant():
length = '30min'
label = 'instant'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0100Z', freq='10min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('start,end', [
(pd.Timestamp('20190101T0000Z'), pd.Timestamp('20190102T0000')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000Z')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000')),
])
def test__make_aggregate_index_localization(start, end):
length = '30min'
label = 'ending'
test_data = {
0: pd.DataFrame(range(1), index=pd.DatetimeIndex([start])),
1: pd.DataFrame(range(1), index=pd.DatetimeIndex([end])),
}
with pytest.raises(TypeError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('inp,oup', [
(pd.DataFrame(dtype=float), pd.Series(dtype=float)),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float),
pd.DataFrame(dtype=float)),
(pd.Series([0, 1]), pd.Series([0, 1])),
(pd.DataFrame([[0, 1], [1, 2]]), pd.DataFrame([[0, 1], [1, 2]])),
pytest.param(
pd.Series([0, 1]),
pd.Series([0, 1], index=pd.date_range(start='now', freq='1min',
periods=2)),
marks=pytest.mark.xfail(type=AssertionError, strict=True)),
pytest.param(
pd.Series([0, 1]),
pd.Series([1, 0]),
marks=pytest.mark.xfail(type=AssertionError, strict=True))
])
def test_sha256_pandas_object_hash(inp, oup):
assert utils.sha256_pandas_object_hash(inp) == utils.sha256_pandas_object_hash(oup) # NOQA
def test_listhandler():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
def test_listhandler_recreate():
logger = logging.getLogger('testlisthandler')
handler = utils.ListHandler()
logger.addHandler(handler)
logger.setLevel('DEBUG')
logger.warning('Test it')
logger.debug('What?')
out = handler.export_records()
assert len(out) == 1
assert out[0].message == 'Test it'
assert len(handler.export_records(logging.DEBUG)) == 2
l2 = logging.getLogger('testlist2')
h2 = utils.ListHandler()
l2.addHandler(h2)
l2.error('Second fail')
out = h2.export_records()
assert len(out) == 1
assert out[0].message == 'Second fail'
def test_hijack_loggers(mocker):
old_handler = mocker.MagicMock()
new_handler = mocker.MagicMock()
mocker.patch('solarforecastarbiter.utils.ListHandler',
return_value=new_handler)
logger = logging.getLogger('testhijack')
logger.addHandler(old_handler)
assert logger.handlers[0] == old_handler
with utils.hijack_loggers(['testhijack']):
assert logger.handlers[0] == new_handler
assert logger.handlers[0] == old_handler
def test_hijack_loggers_sentry(mocker):
events = set()
def before_send(event, hint):
events.add(event['logger'])
return
sentry_sdk.init(
"https://[email protected]/0",
before_send=before_send)
logger = logging.getLogger('testlog')
with utils.hijack_loggers(['testlog']):
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' not in events
events = set()
logging.getLogger('root').error('will show up')
logger.error('AHHH')
assert 'root' in events
assert 'testlog' in events
@pytest.mark.parametrize('data,freq,expected', [
(pd.Series(index=pd.DatetimeIndex([]), dtype=float), '5min',
[pd.Series(index=pd.DatetimeIndex([]), dtype=float)]),
(pd.Series([1.0], index=pd.DatetimeIndex(['2020-01-01T00:00Z'])),
'5min',
[pd.Series([1.0], index=pd.DatetimeIndex(['2020-01-01T00:00Z']))]),
(pd.Series(
[1.0, 2.0, 3.0],
index=pd.date_range('2020-01-01T00:00Z', freq='1h', periods=3)),
'1h',
[pd.Series(
[1.0, 2.0, 3.0],
index=pd.date_range('2020-01-01T00:00Z', freq='1h', periods=3))]),
(pd.Series(
[1.0, 2.0, 4.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.Series(
[1.0, 2.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.Series(
[4.0],
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.Series(
[1.0, 3.0, 5.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T03:00Z',
'2020-01-01T05:00Z'])),
'1h',
[pd.Series(
[1.0],
index=pd.DatetimeIndex(['2020-01-01T01:00Z'])),
pd.Series(
[3.0],
index=pd.DatetimeIndex(['2020-01-01T03:00Z'])),
pd.Series(
[5.0],
index=pd.DatetimeIndex(['2020-01-01T05:00Z'])),
]),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float), '1h',
[pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float)]),
(pd.DataFrame(
{'a': [1.0, 2.0, 4.0], 'b': [11.0, 12.0, 14.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.DataFrame(
{'a': [1.0, 2.0], 'b': [11.0, 12.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])),
pd.DataFrame(
{'a': [4.0], 'b': [14.0]},
index=pd.DatetimeIndex(['2020-01-01T04:00Z'])),
]),
(pd.DataFrame(
{'_cid': [1.0, 2.0, 4.0], '_cid0': [11.0, 12.0, 14.0]},
index=pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z',
'2020-01-01T04:00Z'])),
'1h',
[pd.DataFrame(
{'_cid': [1.0, 2.0], '_cid0': [11.0, 12.0]},
index=
|
pd.DatetimeIndex(['2020-01-01T01:00Z', '2020-01-01T02:00Z'])
|
pandas.DatetimeIndex
|
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
#if enough_reads(window,w,complete=True):
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
coverage = cov_context = 0
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
MU=np.zeros((2,w))
start=datetime.datetime.now()
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','C','T'],np.nan)
methbin = aggreR # backup
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#total += w
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','A','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHG'
print("Done CHG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
def split_bam(samplenames,Folder):
# get bam size
spbam_list = []
bamfile = samplenames + '.bam'
statinfo_out = os.stat(Folder+bamfile)
bamsize = statinfo_out.st_size
samfile = pysam.Samfile(Folder+bamfile, "rb")
fileout_base = os.path.splitext(bamfile)[0] # filename
ext = '.bam'
x = 0
fileout = Folder+fileout_base+"_" + str(x)+ext # filename_x.bam
print("fileout",fileout)
header = samfile.header
outfile = pysam.Samfile(fileout, "wb", header = header)
sum_Outfile_Size=0
for reads in samfile.fetch():
outfile.write(reads)
statinfo_out = os.stat(fileout)
outfile_Size = statinfo_out.st_size
if(outfile_Size >=337374182 and sum_Outfile_Size <= bamsize):
sum_Outfile_Size = sum_Outfile_Size + outfile_Size
x = x + 1
spbam_list.append(fileout_base + "_" + str(x)+ext)
outfile.close()
pysam.index(fileout)
fileout = Folder+fileout_base + "_" + str(x)+ext
print("fileout",fileout)
outfile = pysam.Samfile(fileout, "wb",header = header)
outfile.close()
pysam.index(fileout)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--windowsize",type=int, default=4 ,help='number of CGs')
parser.add_argument("-c", "--cores",type=int, default=4, help='number of cores')
parser.add_argument("-m", "--MeH",type=int, default=2, help='Methylation heterogeneity score 1:Abundance 2:PW 3:Phylogeny')
parser.add_argument("-d", "--dist",type=int, default=1, help='Distance between methylation patterns 1:Hamming 2:WDK')
parser.add_argument("--CG", default=False, action='store_true', help='Include genomic context CG')
parser.add_argument("--CHG", default=False, action='store_true', help='Include genomic context CHG')
parser.add_argument("--CHH", default=False, action='store_true', help='Include genomic context CHH')
parser.add_argument("--opt", default=False, action='store_true', help='Outputs compositions of methylation patterns')
parser.add_argument('--mlv', default=False, action='store_true', help='Outputs methylation levels')
parser.add_argument('--imp', default=True, action='store_false', help='Implement BSImp (impute if valid)')
args = parser.parse_args()
import sys
import time
import os
import pandas as pd
import multiprocessing
from joblib import Parallel, delayed
#num_cores = multiprocessing.cpu_count()
if __name__ == "__main__":
open_log('MeHscreening.log')
logm("Call genome screening.")
#start = time.time()
Folder = 'MeHdata/'
files = os.listdir(Folder)
bam_list = []
# all samples' bam files
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.fa':
fa = filename
if file_extension == '.bam':
bam_list.append(filename)
#if 'cores' in args:
# num_cores = args.cores
#else:
# num_cores = 4
Parallel(n_jobs=args.cores)(delayed(split_bam)(bamfile,Folder=Folder) for bamfile in bam_list)
spbam_list = []
tempfiles = os.listdir(Folder)
for file in tempfiles:
filename, file_extension = os.path.splitext(file)
if file_extension=='.bam' and filename not in bam_list:
spbam_list.append(filename)
#print(spbam_list)
topp = pd.DataFrame(columns=['sample','coverage','context_coverage','context'])
#CG = []
#start=t.time()
if args.CG:
con='CG'
CG=Parallel(n_jobs=args.cores)(delayed(CGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CG.")
# merge MeH within sample
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False, header = True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
#os.chdir('../')
#os.chdir(outputFolder)
logm("Merging ML within samples for CG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
logm("Merging ML between samples for CG.")
# merge ML between samples
if args.mlv:
for sample in bam_list:
tomerge_dir = Folder + con + '_ML_' + str(sample) + '.csv'
res_dir = Folder + con + '_ML_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'ML': sample})
Result=Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result = Result.rename(columns={'ML': sample})
#Result = Result.drop(columns=['counts','pos','depth','dis'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
logm("Merging MeH between samples for CG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result.head()
Result.dropna(axis = 0, thresh=4, inplace = True)
Result = Result.rename(columns={'MeH': sample})
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
Result.to_csv(Folder + con + '_' +'Results.csv' ,index = False,header=True)
print("All done.",len(bam_list),"bam files processed and merged for CG.")
logm("All done. "+str(len(bam_list))+" bam files processed and merged for CG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
if args.CHG:
con='CHG'
CG=Parallel(n_jobs=args.cores)(delayed(CHGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CHG.")
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging ML within samples for CHG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
#Count=Count.drop_duplicates()
#print(Count)
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging MeH between samples for CHG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
#Tomerge = Tomerge.drop(columns=['dis','ML','depth'])
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result.head()
Result = Result.rename(columns={'MeH': sample})
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
logm("Merging ML between samples for CHG.")
# merge ML between samples
if args.mlv:
for sample in bam_list:
tomerge_dir = Folder + con + '_ML_' + str(sample) + '.csv'
res_dir = Folder + con + '_ML_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'ML': sample})
Result=Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result = Result.rename(columns={'ML': sample})
#Result = Result.drop(columns=['counts','pos','depth','dis'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
logm("All done. "+str(len(bam_list))+" bam files processed and merged for CHG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
if args.CHH:
con='CHH'
CG=Parallel(n_jobs=args.cores)(delayed(CHHgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CHH.")
# merge MeH within sample
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging ML within samples for CHH.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
#Count=Count.drop_duplicates()
#print(Count)
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging MeH between samples for CHH.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
#Tomerge = Tomerge.drop(columns=['dis','ML','depth'])
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result =
|
pd.read_csv(tomerge_dir)
|
pandas.read_csv
|
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_description
class SampleIdError(RuntimeError):
def __init__(self, sample_id: str, message: str):
self.sample_id = sample_id
self.message = message
class NotNumericSeriesError(RuntimeError):
def __init__(self, message: str):
self.message = message
class UnknowSelectionTypeError(RuntimeError):
def __init__(self, message: str):
self.message = message
class NotInColumnError(RuntimeError):
def __init__(self, message: str):
self.message = message
class GenesRelationError(RuntimeError):
def __init__(self, message: str):
self.message = message
class VariantUndefinedError(RuntimeError):
def __init__(self, message: str):
self.message = message
class ListsUnEqualLengthError(RuntimeError):
def __init__(self, message: str):
self.message = message
class DatetimeFormatError(RuntimeError):
def __init__(self, message: str):
self.message = message
class CDx_Data():
"""[summary]
"""
def __init__(self,
mut_df: pd.DataFrame = None,
cli_df: pd.DataFrame = None,
cnv_df: pd.DataFrame = None,
sv_df: pd.DataFrame = None,
json_str: str = None):
"""Constructor method with DataFrames
Args:
mut_df (pd.DataFrame, optional): SNV and InDel info. Defaults to None.
cli_df (pd.DataFrame, optional): Clinical info. Defaults to None.
cnv_df (pd.DataFrame, optional): CNV info. Defaults to None.
sv_df (pd.DataFrame, optional): SV info. Defaults to None.
"""
self.json_str = json_str
self.mut = mut_df
self.cnv = cnv_df
self.sv = sv_df
if not cli_df is None:
self.cli = cli_df
self.cli = self._infer_datetime_columns()
else:
self._set_cli()
self.crosstab = self.get_crosstab()
def __len__(self):
return 0 if self.cli is None else len(self.cli)
def __getitem__(self, n):
return self.select_by_sample_ids([self.cli.sampleId.iloc[n]])
def __sub__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = None if self.cli is None and cdx.cli is None else pd.concat(
[self.cli, cdx.cli]).drop_duplicates(keep=False)
mut = None if self.mut is None and cdx.mut is None else pd.concat(
[self.mut, cdx.mut]).drop_duplicates(keep=False)
cnv = None if self.cnv is None and cdx.cnv is None else pd.concat(
[self.cnv, cdx.cnv]).drop_duplicates(keep=False)
sv = None if self.sv is None and cdx.sv is None else pd.concat(
[self.sv, cdx.sv]).drop_duplicates(keep=False)
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def __add__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = pd.concat([self.cli, cdx.cli]).drop_duplicates()
mut = pd.concat([self.mut, cdx.mut]).drop_duplicates()
cnv = pd.concat([self.cnv, cdx.cnv]).drop_duplicates()
sv = pd.concat([self.sv, cdx.sv]).drop_duplicates()
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def from_PETA(self,
token: str,
json_str: str,
host='https://peta.bgi.com/api'):
"""Retrieve CDx data from BGI-PETA database.
Args:
token (str): Effective token for BGI-PETA database
json_str (str): The json format restrictions communicating to the database
"""
self.json_str = json_str
peta = Peta(token=token, host=host)
peta.set_data_restriction_from_json_string(json_str)
# peta.fetch_clinical_data() does`not process dtype inference correctly, do manully.
#self.cli = peta.fetch_clinical_data()
self.cli = pd.read_csv(
StringIO(peta.fetch_clinical_data().to_csv(None, index=False)))
self.mut = peta.fetch_mutation_data()
self.cnv = peta.fetch_cnv_data()
self.sv = peta.fetch_sv_data()
# dedup for the same sampleId in different studyIds, discard the duplicated ones from all tables
cli_original = self.cli
self.cli = self.cli.drop_duplicates('sampleId')
if (len(self.cli) < len(cli_original)):
print('Duplicated sampleId exists, drop duplicates and go on')
undup_tuple = [(x, y)
for x, y in zip(self.cli.sampleId, self.cli.studyId)]
self.sv = self.sv[self.sv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.cnv = self.cnv[self.cnv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.mut = self.mut[self.mut.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
# time series
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
return filter_description(json_str)
def filter_description(self):
"""retrun filter description when data load from PETA
Returns:
str: description
"""
return filter_description(self.json_str) if self.json_str else None
def from_file(self,
mut_f: str = None,
cli_f: str = None,
cnv_f: str = None,
sv_f: str = None):
"""Get CDx data from files.
Args:
mut_f (str, optional): File as NCBI MAF format contains SNV and InDel. Defaults to None.
cli_f (str, optional): File name contains clinical info. Defaults to None.
cnv_f (str, optional): File name contains CNV info. Defaults to None.
sv_f (str, optional): File name contains SV info. Defaults to None.
"""
if not mut_f is None:
self.mut = pd.read_csv(mut_f, sep='\t')
if not cnv_f is None:
self.cnv =
|
pd.read_csv(cnv_f, sep='\t')
|
pandas.read_csv
|
'''
script to create null models for baseline stats
'''
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score, auc, precision_recall_curve, log_loss
from argparse import ArgumentParser
import torch
from torch.nn import BCELoss
df = pd.read_csv(Path('data') / 'nodulelabels.csv')
binary_outcomes = [x for x in df.columns if '_binary' in x]
mdfs = {}
for outcome in binary_outcomes:
y = df[outcome]
X = np.zeros_like(y).reshape(-1,1)
lr = LogisticRegression().fit(X,y)
pred_probs = lr.predict_proba(X)[:,0]
pred_y = lr.predict(X)
precision, recall, thresholds = precision_recall_curve(y, pred_y)
prauc_val = auc(recall, precision) # put recall as first argument because this argument needs to be sorted
criterion = BCELoss()
metrics = {
'auc': [roc_auc_score(y, pred_probs)],
'accuracy': [accuracy_score(y, pred_y)],
'prauc': [prauc_val],
'avg_val_loss': [log_loss(y, pred_y)],
'avg_val_loss2': [criterion(torch.tensor(pred_y).float(), torch.tensor(y).float())]
}
mdf = pd.DataFrame.from_dict(metrics, orient='columns')
mdfs[outcome.split('_')[0]] = mdf
mdf =
|
pd.concat(mdfs, ignore_index=False)
|
pandas.concat
|
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import data_processing0 as dp
import input as ip
import datetime
import math
import time
import copy
def bomb(file_path = '5fold/df_dh.csv', data = None):
d = pd.read_csv(file_path)
f = pd.read_csv(file_path)
wat = pd.read_excel("sitaiqu/kilowatt_everyday_2year.xlsx", sheet_name=dp.SN)
df=d.sample(n=1)
df.reset_index(inplace=True)
date = df.loc[0,'date']
print(date)
wat.rename(columns={wat.columns[0]: "index",
wat.columns[1]: "redidentsID",
wat.columns[2]: "userID",
wat.columns[3]: "meterID",
wat.columns[4]: "date",
wat.columns[5]: "usage",
}, inplace=True)
wat = wat.drop_duplicates(['meterID', 'date'])
wat = wat[wat['usage'] >= 0]
wat['date']=pd.to_datetime(wat['date'])
get_id = wat[wat['date']==date]
x1 = copy.deepcopy(f['super'])
y1 = copy.deepcopy(f['error'])
#plt.scatter(x1, y1, color='r',s=10)
id = dp.SMID
while id==dp.SMID:
get_id = get_id.sample(n=1)
get_id.reset_index(inplace=True)
id = get_id.loc[0,'meterID']
new = wat[(wat['date']>=date) & (wat['meterID']==id)]
# for i in range(0,new.iloc[:,0].size-1):
# new.loc[i,'usage']=new.loc[i,'usage']*(1+i/100)
# sum+= new.loc[i,'usage']*i/100
print(new)
def update(x):
k = new[new['date'] == x.loc['date']]
k.reset_index(inplace=True)
print(k)
k = k.loc[0, 'usage']
i = (pd.to_datetime(x.loc['date']) -
|
pd.to_datetime(date)
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OptExp (c) University of Manchester 2018
OptExp is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>
Created on Tue Nov 27 16:01:46 2018
@author: pablo
"""
import numpy as np
import pandas as pd
import itertools, re
from scipy.stats import f as FDist, ncf as ncFDist
from .doebase import doeTemplate, promoterList, plasmidList, read_excel
def defineTemplate(parts, genes):
""" Generates the DoE template format from a list of parts and genes
- RefParts.csv: Name, Type, Part
- GeneParts.csv: Name, Type, Part, Step
Type: origin, resistance, promoter, gene
Step: Enzyme step in the pathway (eventually could be implemented
for the other genetic parts)
"""
prom = []
ori = []
for i in parts.index:
ptype = parts.loc[i,'Type']
name = parts.loc[i,'Name']
if ptype == 'promoter':
prom.append(name)
elif ptype == 'origin':
ori.append(name)
for i in range(0,len(prom)):
prom.append(None)
tree = []
gdict = {}
for i in genes.index:
name = genes.loc[i,'Name']
step = "gene%00d" % (int(genes.loc[i,'Step']),)
if step not in tree:
tree.append(step)
if step not in gdict:
gdict[step] = []
gdict[step].append(name)
doe = doeTemplate(tree, origins=ori, promoters=prom, genes=gdict, positional=False)
return doe, parts, genes
def mainDoe(doe,size):
""" Main DoE procedure
"""
fact, partinfo = read_excel( None, doedf=doe )
seed = np.random.randint(10000)
diagnostics = callDoE(fact, size, seed=seed)
return diagnostics
def getDoe(parts, genes,size=32):
""" DoE request from parts and genes files (see defineTemplate)
"""
doe,parts,genes = defineTemplate(parts, genes)
diagnostics = mainDoe(doe,size)
return diagnostics
def doeRequest(f, ftype, size):
""" DoE request from the template format
f: filename
ftype: csv or xlsx
size: lib size
"""
print('Received:',ftype)
if ftype == 'csv':
doe = pd.read_csv( f )
elif ftype == 'xlsx' or ftype == 'xls':
doe = pd.read_excel( f )
else:
doe = pd.read_table( f )
diagnostics = mainDoe(doe,size)
return diagnostics
def evaldes( steps, variants, npromoters, nplasmids, libsize, positional,
outfile=None, random=False ):
""" Generate and evaluate an optimal design of a pathway circuit following the template:
1. Vector: 1 to nplasmids
2. Promoter: 1 to npromoters
3. Gene: 1 to variants
4. Terminator + Promoter: None to npromoters, prob(None)= 0.5
5. Gene: 1 to variants
...
Parameters:
- steps: number of steps in the pathway
- variants: number of variants in each step (currently fixed)
- npromoters: number of promoters in each step
- nplasmids: number of plasmids in each step
- libsize: desired library size
- positional: gene rearrangement is allowed
- outfile: output the doe into outfile if given
- random: random DoE instead of optimal
"""
plasmids = plasmidList(nplasmids)
promoters = promoterList(npromoters)
tree = []
genes = {}
for i in np.arange(steps):
rid = "r%0d" % (i,)
tree.append(rid)
genes[rid] = []
for j in np.arange(variants):
gid = "g%0d_%0d" % (i,j)
genes[rid].append(gid)
doe = doeTemplate( tree, plasmids, promoters, genes, positional )
if outfile is not None:
doe.to_excel( outfile, index=False )
fact, partinfo = read_excel( outfile )
else:
fact, partinfo = read_excel( None, doedf=doe )
seed = np.random.randint(10000)
diagnostics = callDoE(fact, size=libsize, seed=seed)
diagnostics['steps'] = steps
diagnostics['variants'] = variants
diagnostics['npromoters'] = npromoters
diagnostics['nplasmids'] = nplasmids
return diagnostics
def callDoE(fact, size, seed, starts=1, RMSE=10,
alpha=0.05, random=False ):
starts = 1
RMSE = 10
alpha = 0.05
try:
factors, fnames, diagnostics = makeDoeOptDes(fact, size=size,
seed=seed, starts=starts,
RMSE= RMSE, alpha=alpha,
random=random )
except:
raise
diagnostics['libsize'] = size
return diagnostics
def makeDoeOptDes(fact, size, seed=None, starts=1040, makeFullFactorial=False, RMSE=1, alpha=0.05, verbose=False, random=False):
""" Full DoE script:
- fact: a dictionary contained the desired design
"""
# To Do: full factorial
factors = []
fnames = []
npos = 0
nfact = 0
for pos in sorted(fact):
name = fact[pos].component+str(pos)
if len(fact[pos].levels) > 1:
nfact += 1
# Currently only working with categorical
# if fact[pos]['component'] != 'gene' and '-' not in fact[pos]['levels']:
# varType = 'Discrete Numeric'
# theLevels = [ x for x in range(1, len(fact[pos]['levels'])+1 ) ]
# factors.append( theLevels )
# fnames.append(name)
# else:
# varType = 'Categorical'
theLevels = [ '"L{}"'.format(x) for x in range(1, len(fact[pos].levels)+1 ) ]
factors.append(set(theLevels))
fnames.append(name)
if fact[pos].positional is not None:
npos += 1
if npos > 1:
# Total possible arrangements in orthogonal latin squares
# varType = 'Categorical'
theLevels = ['"L{}"'.format(x) for x in range(1, npos*(npos-1)+1)]
factors.append( set( theLevels ) )
fnames.append('pos')
nfact += 1
if seed is not None:
np.random.seed( seed )
else:
seed = np.random.randint(100000, size=1)
np.random.seed( seed )
initGrid(factors)
if random:
# If set, perform a random design instead of a D-optimal design
M = randExp( factors, n=int(size) )
J = Deff2(M, factors)
else:
if np.product( [len(x) for x in factors] ) < size:
# raise Exception('Library size is too large!')
# TO DO: make a full factorial
M = fullFactorial( factors )
J = Deff2(M, factors)
size = M.shape[0]
ix = np.arange(size)
np.random.shuffle( ix )
M = M[ix,:]
else:
M, J = CoordExch(factors, n=int(size), runs=2, verb=verbose, mode='coordexch', seed=seed)
if M is None:
raise Exception('No solution')
M1 = MapDesign2(factors, M)
X = mapFactors2( M, factors )
df = pd.DataFrame(M1, columns=fnames)
pows = CatPower(X , factors, RMSE=RMSE, alpha=alpha)
rpvs = RPV(X)
diagnostics = {'J': J, 'pow': pows, 'rpv': rpvs, 'X': X,
'M': M, 'factors': factors, 'fact': fact,
'M1': M1, 'df': df, 'names': fnames, 'seed': seed}
return factors, fnames, diagnostics
def Deff(X):
# D-efficiency
return (100.0/X.shape[0]) * ( np.linalg.det( np.dot( np.transpose( X ), X ) )**(1.0/X.shape[1]))
def Deff2(M, factors):
X = mapFactors2(M, factors)
return (100.0/X.shape[0]) * ( np.linalg.det( np.dot( np.transpose( X ), X ) )**(1.0/X.shape[1]))
def Dopt(X):
# D-optimality
return np.linalg.det( np.dot( np.transpose( X ), X ) )
def Dopt2(M, factors):
# D-optimality
X = mapFactors2(M, factors)
return np.linalg.det( np.dot( np.transpose( X ), X ) )
def SE(X):
# Estimation efficiency
return np.diag( np.linalg.inv( np.dot( np.transpose( X ), X ) ) )
def RPV(X):
# Relative prediction variance
try:
XXi = np.linalg.inv( np.dot( np.transpose( X ), X ) )
except:
return [np.nan for i in np.arange(X.shape[0])]
return [np.dot( np.dot( np.transpose( X[i,:] ), XXi), X[i,:]) for i in np.arange(X.shape[0])]
def Contrib(X):
cn = []
for i in range(0, X.shape[0]):
cn.append( Dopt( np.vstack( [X[:i,:], X[(i+1):,:]] ) ) )
return cn
def VarAdd(X,xj):
# Variance of adding/removing one experiment
return np.dot( np.dot( np.transpose(xj) , np.linalg.inv( np.dot( np.transpose( X ), X) ) ), xj )
def randExp( factors, n ):
# Generate n random experiments
V = None
for levels in factors:
vnew = np.random.randint(0, len(levels), n)
if V is None:
V = vnew
else:
V = np.vstack( [V, vnew] )
if len(V.shape) == 1:
V = np.expand_dims(V, axis=0)
return np.transpose( V )
#%%
def grid(n, weighted=True):
""" Provide normalized vectors of n-1 dummy variables
Useful for computing the model matrix (X) to
use pseudo-orthogonal terms in the n-1 hypercube.
(Experimental)
In JMP, grid(3) is multiplied by sqrt(2), grid(4) by
sqrt(3), which brings back the weight of the number of
factors
"""
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
base = np.eye(n)*2 - 1
pc = PCA(n-1, whiten=True, random_state=0)
bt = pc.fit_transform(base)
W = normalize(bt)
if weighted:
W = W*np.sqrt(n-1)
return W
# Precompute the hypercube grids
gridList = {}
def initGrid(factors):
global gridList
vmax = set( [len(x) for x in factors] )
for i in vmax:
try:
if i < 2:
continue
except:
continue
gridList[i] = grid(i)
#%%
def mapFactors( factors, M ):
# Map a numerical factor into [-1,1] range, create dummy variables for categorical factors
Mn = np.transpose( [np.ones( M.shape[0] )] )
for i in np.arange( len(factors) ):
v = factors[i]
if type(v) == list:
if len(set(v)) > 1:
# Normalize between [-1,+1]
Vn = (2*(M[:,i] - M[:,i].min())/(M[:,i].max()-M[:,i].min()) - 1)
Vn = np.transpose( [Vn] )
else:
Vn = np.transpose( [np.ones( M.shape[0] )] )
else:
if len(v) > 1:
Vn = -np.ones( (M.shape[0],len(v)) )
j = np.arange(M.shape[0])
Vn[j,M[j,i]] = 1
Vn = Vn[:,:-1]
else:
Vn = np.transpose( [np.ones( M.shape[0] )] )
if Mn is None:
Mn = Vn
else:
Mn = np.hstack( [Mn, Vn])
return Mn
def mapFactors2( M, factors ):
# Map a numerical factor into [-1,1] range,
# create orthogonal coordinates for dummy variables for categorical factors
global gridList
# Add column for intercept
Mn = np.transpose( [np.ones( M.shape[0] )] )
for i in np.arange( len(factors) ):
v = factors[i]
Vn = None
if type(v) == list:
if len(set(v)) > 1:
# Normalize between [-1,+1]
Vn = (2*(M[:,i] - M[:,i].min())/(M[:,i].max()-M[:,i].min()) - 1)
Vn = np.transpose( [Vn] )
else:
Vn = np.transpose( [np.ones( M.shape[0] )] )
else:
if len(v) > 1:
# Use grid
j = np.arange(M.shape[0])
Vn = gridList[len(v)][M[j,i],:]
else: # 19/02/13: Constant factor, already contained in the intercept
#Vn = np.transpose( [np.ones( M.shape[0] )] )
pass
if Vn is not None:
if Mn is None:
# Maybe not needed, we alrady have the intercept
Mn = Vn
else:
Mn = np.hstack( [Mn, Vn])
return Mn
def MapExp( E ):
""" Read a design, transform into X matrix """
# Define factors in the same way as for the library
factors = [ set(np.unique(E[:,i])) for i in np.arange(E.shape[1])]
initGrid(factors)
EE = np.transpose( np.array([ list(np.unique(E[:,i], return_inverse=True)[1]) for i in np.arange(E.shape[1])] ) )
M = mapFactors( factors, EE )
return M, factors, EE
def MapDesign(factors, X):
""" Map back from X to the factors """
M = []
for i in np.arange(X.shape[0]):
row = []
# skip intercept
j = 1
for fa in factors:
levels = sorted(fa)
# If none is set
level = levels[-1]
for l in levels[0:-1]:
if X[i,j] == 1:
level = l
j += 1
row.append(level)
M.append( row )
return np.array( M )
def MapDesign2(factors, M):
""" Map back from M to the factors """
N = []
for i in np.arange(M.shape[0]):
row = []
for j in np.arange(M.shape[1]):
levels = sorted(factors[j])
row.append( levels[ M[i,j] ])
N.append( row )
return np.array( N )
def JMPRead(jmpfile):
""" This is a JMP example: """
# Design Evaluation
# Design Diagnostics
# D Optimal Design
# D Efficiency 87.98414
# G Efficiency 64.62616
# A Efficiency 76.00696
# Average Variance of Prediction 1.229865
# Design Creation Time (seconds) 11
# Read design
if re.search(jmpfile, 'xlsx$'):
E = pd.read_excel(jmpfile)
else:
E =
|
pd.read_csv(jmpfile)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from linear_regression import Linear_regression
def Call_myLRmodel(data):
# add ones column
data.insert(0, 'Ones', 1)
# set X (training data) and y (target variable)
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
# convert to matrices and initialize theta
X = np.array(X.values)
y = np.array(y.values)
theta = np.zeros([1,cols-1])
#
alpha = 0.01
iters = 1000
model = Linear_regression(theta, alpha)
# perform linear regression on the data set
g, cost = model.gradientDescent(X, y, iters)
# get the cost (error) of the model
all_cost = model.computeCost(X, y, g)
'''
# show the curve of Cost
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(np.arange(iters), cost, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
plt.show()
'''
return g, cost, all_cost
def Call_SklearnLR(data):
# Using sklearn
from sklearn import linear_model
cols = data.shape[1]
# the sklearn_parameters
X_sk = data.iloc[:,0:cols-1]
y_sk = data.iloc[:,cols-1:cols]
X_sk = np.array(X_sk.values)
y_sk = np.array(y_sk.values)
model_sk = linear_model.LinearRegression()
model_sk.fit(X_sk, y_sk)
return model_sk.coef_, model_sk.score(X_sk, y_sk)
if __name__ == "__main__":
path = 'ex1data2.txt'
data = pd.read_csv(path, header=None, names=['Size', 'Bedrooms', 'Price'])
print(data.head())
#Make sure features are on a similar scale.
#Which make the gradient descent faster
data = (data - data.mean()) / data.std()
print(data.head())
Para_1, _, score_1 = Call_myLRmodel(data)
Para_2, score_2 = Call_SklearnLR(data)
dict = [{'Parameter':Para_1[:,1:], 'Score':score_1},
{'Parameter':Para_2[:,1:], 'Score':score_2}]
df =
|
pd.DataFrame(dict)
|
pandas.DataFrame
|
import xgboost as xgb
import pandas as pd
import numpy as np
import copy
from training import ModelsBaseClass
class XGBoostRegression(ModelsBaseClass.BaseModel):
"""Class containing XGBoost Regression Model"""
def __init__(self, target_column: str, seasonal_periods: int, tree_meth: str = 'auto', learning_rate: float = 0.3,
max_depth: int = 6, subsample: float = 1, colsample_by_tree: float = 1, n_estimators: int = 100,
gamma: float = 0, alpha: int = 0, reg_lambda: int = 1, one_step_ahead: bool = False):
"""
:param target_column: target_column for prediction
:param seasonal_periods: seasonal periodicity
:param tree_meth: tree_method to use
:param learning_rate: boosting learning rate
:param max_depth: maximum depth for base learners
:param subsample: subsample ration of training instance
:param colsample_by_tree: subsample ratio of columns for constructing each tree
:param n_estimators: number of trees
:param gamma: minimum loss reduction required to make a further partition on leaf node
:param alpha: l1 regularization term
:param reg_lambda: l2 regularization term
:param one_step_ahead: perform one step ahead prediction
"""
super().__init__(target_column=target_column, seasonal_periods=seasonal_periods, name='XGBoostRegression',
one_step_ahead=one_step_ahead)
self.model = xgb.XGBRegressor(tree_method=tree_meth, objective='reg:squarederror', learning_rate=learning_rate,
max_depth=max_depth, subsample=subsample, colsample_by_tree=colsample_by_tree,
random_state=42, n_estimators=n_estimators, gamma=gamma, alpha=alpha,
reg_lambda=reg_lambda, verbosity=0)
def train(self, train: pd.DataFrame, cross_val_call: bool = False) -> dict:
"""
Train XGB model
:param train: train set
:param cross_val_call: called to perform cross validation
:return dictionary with cross validated scores (if specified)
"""
cross_val_score_dict = {}
if cross_val_call:
cross_val_score_dict_ts, self.model = self.get_cross_val_score(train=train)
cross_val_score_dict_shuf, self.model = self.get_cross_val_score(train=train, normal_cv=True)
cross_val_score_dict = {**cross_val_score_dict_ts, **cross_val_score_dict_shuf}
self.model.fit(X=train.drop([self.target_column], axis=1), y=train[self.target_column])
return cross_val_score_dict
def update(self, train: pd.DataFrame, model: xgb.XGBRegressor) -> xgb.XGBRegressor:
"""
Update existing model due to new samples
:param train: train set with new samples
:param model: model to update
:return: updated model
"""
return model.fit(X=train.drop([self.target_column], axis=1), y=train[self.target_column])
def insample(self, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed) insample predictions
:param train: train set
:return: DataFrame with insample predictions
"""
insample = pd.DataFrame(data=self.model.predict(data=train.drop([self.target_column], axis=1)),
index=train.index, columns=['Insample'])
return insample
def predict(self, test: pd.DataFrame, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed), if specified one step ahead, out-of-sample predictions
:param test: test set
:param train: train set
:return: DataFrame with predictions, upper and lower confidence level
"""
if self.one_step_ahead:
train_manip = train.copy()
predict_lst = []
# deep copy model as predict function should not change class model
model = copy.deepcopy(self.model)
for i in range(0, test.shape[0]):
fc = model.predict(data=test.drop([self.target_column], axis=1).iloc[[i]])
train_manip = train_manip.append(test.iloc[[i]])
model = self.update(train=train_manip, model=model)
predict_lst.append(fc)
predict = np.array(predict_lst).flatten()
else:
predict = self.model.predict(data=test.drop([self.target_column], axis=1))
predictions = pd.DataFrame({'Prediction': predict}, index=test.index)
return predictions
def plot_feature_importance(self, importance_type: str = 'weight'):
"""
Plot feature importance for XGB Regressor
:param importance_type: importance type to use
‘weight’: the number of times a feature is used to split the data across all trees.
‘gain’: the average gain across all splits the feature is used in.
‘cover’: the average coverage across all splits the feature is used in.
‘total_gain’: the total gain across all splits the feature is used in.
‘total_cover’: the total coverage across all splits the feature is used in.
"""
feature_important = self.model.get_booster().get_score(importance_type=importance_type)
keys = list(feature_important.keys())
values = list(feature_important.values())
data =
|
pd.DataFrame(data=values, index=keys, columns=["score"])
|
pandas.DataFrame
|
import re
import pandas
import cobra
from fractions import Fraction
def ReadExcel(excel_file, parse="cobra_string", Print=False):
""" parse = "cobra_string" | "cobra_position"
cobra_string
% INPUT
% fileName xls spreadsheet, with one 'Reaction List' and one 'Metabolite List' tab
%
% 'Reaction List' tab: Required headers (case sensitive):
% 'Abbreviation' HEX1
% 'Description' Hexokinase
% 'Reaction' 1 atp[c] + 1 glc-D[c] --> 1 adp[c] + 1 g6p[c] + 1 h[c]
% 'GPR' (3098.3) or (80201.1) or (2645.3) or ...
% 'Genes' 2645.1,2645.2,2645.3,... (optional)
% 'Proteins' Flj22761.1, Hk1.3, Gck.2,... (optional)
% 'Subsystem' Glycolysis
% 'Reversible' 0 (false) or 1 (true)
% 'Lower bound' 0
% 'Upper bound' 1000
% 'Objective' 0 (optional)
% 'Confidence Score' 0,1,2,3,4
% 'EC Number' 2.7.1.1,2.7.1.2
% 'Notes' 'Reaction also associated with EC 2.7.1.2' (optional)
% 'References' PMID:2043117,PMID:7150652,... (optional)
%
% 'Metabolite List' tab: Required headers (case sensitive): (needs to be complete list of metabolites, i.e., if a metabolite appears in multiple compartments it has to be represented in multiple rows. Abbreviations need to overlap with use in Reaction List
% 'Abbreviation' glc-D or glc-D[c]
% 'Description' D-glucose
% 'Neutral formula' C6H12O6
% 'Charged formula' C6H12O6
% 'Charge' 0
% 'Compartment' cytosol
% 'KEGG ID' C00031
% 'PubChem ID' 5793
% 'ChEBI ID' 4167
% 'InChI string' InChI=1/C6H12O6/c7-1-2-3(8)4(9)5(10)6(11)12-2/h2-11H,1H2/t2-,3-,4+,5-,6?/m1/s1
% 'SMILES' OC[C@H]1OC(O)[C@H](O)[C@@H](O)[C@@H]1O
% 'HMDB ID' HMDB00122
%
% OPTIONAL INPUT (may be required for input on unix macines)
% biomassRxnEquation .xls may have a 255 character limit on each cell,
% so pass the biomass reaction separately if it hits this maximum.
%
% OUTPUT
% model COBRA Toolbox model
cobra_position
% INPUT
% excel_file xls spreadsheet, with one 'reactions' and one 'metabolites' tab
%
% 'reactions' tab: Required headers:
% col 0 Abbreviation HEX1
% col 1 Description Hexokinase
% col 2 Reaction 1 atp[c] + 1 glc-D[c] --> 1 adp[c] + 1 g6p[c] + 1 h[c]
% col 3 GPR b0001
% col 4 Genes b0001 (optional: column can be empty)
% col 5 Proteins AlaS (optional: column can be empty)
% col 6 Subsystem Glycolysis
% col 7 Reversible 0
% col 8 Lower bound 0
% col 9 Upper bound 1000
% col 10 Objective 0 (optional: column can be empty)
% col 11 Confidence Score 0,1,2,3,4
% col 12 EC. Number 1.1.1.1
% col 13 Notes N/A (optional: column can be empty)
% col 14 References PMID: 1111111 (optional: column can be empty)
%
% 'metabolites' tab: Required headers: needs to be complete list of metabolites, i.e., if a metabolite appears in multiple compartments it has to be represented in multiple rows. Abbreviations needs to overlap with use in Reaction List
% col 0 Abbreviation
% col 1 Description
% col 2 Neutral formula
% col 3 Charge formula
% col 4 Charge
% col 5 Compartment
% col 6 KEGG ID
% col 7 PubChem ID
% col 8 ChEBI ID
% col 9 InChI string
% col 10 SMILES
% col 11 HMDB ID
%
%
% OUTPUT
% model cobrapy model """
excel = pandas.ExcelFile(excel_file)
for sheet in excel.sheet_names:
if sheet == "Reaction List" and parse == "cobra_string":
reactions = excel.parse(sheet,index_col=None)
elif 'reaction' in sheet.lower():
reactions = excel.parse(sheet,index_col=None)
if sheet == "Metabolite List" and parse == "cobra_string":
metabolites = excel.parse(sheet,index_col=None)
elif 'metabolite' in sheet.lower():
metabolites = excel.parse(sheet,index_col=None)
cobra_reaction_position = ['Abbreviation','Description','Reaction','GPR','Genes','Proteins','Subsystem','Reversible','Lower bound','Upper bound','Objective','Confidence Score','EC Number','Notes','References']
cobra_metabolite_position = ['Abbreviation','Description','Neutral formula','Charged formula','Charge','Compartment','KEGG ID','PubChem ID','ChEBI ID','InChI string','SMILES','HMDB ID']
if parse == "cobra_position":
if len(reactions.columns) > 15:
reactions = reactions.iloc[:,:15]
reactions.columns = cobra_reaction_position
else:
reactions.columns = cobra_reaction_position[:len(reactions.columns)]
if len(metabolites.columns) > 12:
metabolites = metabolites.iloc[:,:12]
metabolites.columns = cobra_metabolite_position
else:
metabolites.columns = cobra_metabolite_position[:len(metabolites.columns)]
model = cobra.Model()
metabolite_dic = {}
element_re = re.compile("([A-Z][a-z]?)([0-9.]+[0-9.]?|(?=[A-Z])?)")
for met in metabolites.index:
met_row = metabolites.loc[met] # pandas.Series of the metabolites
met_id = str(met_row['Abbreviation'])
#met_name = str(met_row[1]) if pandas.notnull(met_row[1]) else None
met_name = str(met_row['Description']) if ('Description' in met_row.index) and pandas.notnull(met_row['Description']) else None
if ('Charged formula' in met_row.index) and pandas.notnull(met_row['Charged formula']):
met_formula = str(met_row['Charged formula'])
elif ('Neutral formula' in met_row.index) and pandas.notnull(met_row['Neutral formula']):
if ('Charge' in met_row.index) and pandas.notnull(met_row['Charge']):
met_formula = ''
tmp_formula = str(met_row['Neutral formula'])
tmp_formula = tmp_formula.replace("*", "")
parsed = element_re.findall(tmp_formula)
for (element, count) in parsed:
if element != "H":
met_formula += element + str(count)
else:
if count == '':
count = 1
count = float(count)
if count.is_integer():
count = int(count)
charge = float(met_row['Charge'])
if charge.is_integer():
charge = int(charge)
count += charge
if count == 1:
met_formula += element
elif count != 0:
met_formula += element + str(count)
else:
met_formula = None
else:
met_formula = None
met_compartment = str(met_row['Compartment']) if 'Compartment' in met_row.index and pandas.notnull(met_row['Compartment']) else None
metabolite = cobra.Metabolite(met_id, formula=met_formula, name=met_name, compartment=met_compartment)
if ('Charge' in met_row.index) and
|
pandas.notnull(met_row['Charge'])
|
pandas.notnull
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2014-2015 eNovance
#
# Authors: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Time series data manipulation, better with pancetta."""
import functools
import operator
import re
import msgpack
import numpy
import pandas
import six
AGGREGATION_METHODS = set(('mean', 'sum', 'last', 'max', 'min',
'std', 'median', 'first', 'count'))
class NoDeloreanAvailable(Exception):
"""Error raised when trying to insert a value that is too old."""
def __init__(self, first_timestamp, bad_timestamp):
self.first_timestamp = first_timestamp
self.bad_timestamp = bad_timestamp
super(NoDeloreanAvailable, self).__init__(
"%s is before %s" % (bad_timestamp, first_timestamp))
class UnAggregableTimeseries(Exception):
"""Error raised when timeseries cannot be aggregated."""
def __init__(self, reason):
self.reason = reason
super(UnAggregableTimeseries, self).__init__(reason)
class TimeSerie(object):
def __init__(self, timestamps=None, values=None):
self.ts = pandas.Series(values, timestamps).sort_index()
def __eq__(self, other):
return (isinstance(other, TimeSerie)
and self.ts.all() == other.ts.all())
def __getitem__(self, key):
return self.ts[key]
def set_values(self, values):
t = pandas.Series(*reversed(list(zip(*values))))
self.ts = t.combine_first(self.ts).sort_index()
def __len__(self):
return len(self.ts)
@staticmethod
def _timestamps_and_values_from_dict(values):
v = tuple(
zip(*dict(
(pandas.Timestamp(k), v)
for k, v in six.iteritems(values)).items()))
if v:
return v
return (), ()
@classmethod
def from_dict(cls, d):
"""Build a time series from a dict.
The dict format must be datetime as key and values as values.
:param d: The dict.
:returns: A TimeSerie object
"""
return cls(*cls._timestamps_and_values_from_dict(d['values']))
def to_dict(self):
return {
'values': dict((timestamp.value, float(v))
for timestamp, v
in six.iteritems(self.ts.dropna())),
}
@staticmethod
def _serialize_time_period(value):
if value:
return six.text_type(value.n) + value.rule_code
class BoundTimeSerie(TimeSerie):
def __init__(self, timestamps=None, values=None,
block_size=None, back_window=0):
"""A time serie that is limited in size.
Used to represent the full-resolution buffer of incoming raw
datapoints associated with a metric.
The maximum size of this time serie is expressed in a number of block
size, called the back window.
When the timeserie is truncated, a whole block is removed.
You cannot set a value using a timestamp that is prior to the last
timestamp minus this number of blocks. By default, a back window of 0
does not allow you to go back in time prior to the current block being
used.
"""
super(BoundTimeSerie, self).__init__(timestamps, values)
self.block_size = pandas.tseries.frequencies.to_offset(block_size)
self.back_window = back_window
self._truncate()
def __eq__(self, other):
return (isinstance(other, BoundTimeSerie)
and super(BoundTimeSerie, self).__eq__(other)
and self.block_size == other.block_size
and self.back_window == other.back_window)
def set_values(self, values, before_truncate_callback=None):
if self.block_size is not None and not self.ts.empty:
# Check that the smallest timestamp does not go too much back in
# time.
# TODO(jd) convert keys to timestamp to be sure we can subtract?
smallest_timestamp = min(map(operator.itemgetter(0), values))
first_block_timestamp = self._first_block_timestamp()
if smallest_timestamp < first_block_timestamp:
raise NoDeloreanAvailable(first_block_timestamp,
smallest_timestamp)
super(BoundTimeSerie, self).set_values(values)
if before_truncate_callback:
before_truncate_callback(self)
self._truncate()
@classmethod
def from_dict(cls, d):
"""Build a time series from a dict.
The dict format must be datetime as key and values as values.
:param d: The dict.
:returns: A TimeSerie object
"""
timestamps, values = cls._timestamps_and_values_from_dict(d['values'])
return cls(timestamps, values,
block_size=d.get('block_size'),
back_window=d.get('back_window'))
def to_dict(self):
basic = super(BoundTimeSerie, self).to_dict()
basic.update({
'block_size': self._serialize_time_period(self.block_size),
'back_window': self.back_window,
})
return basic
def _first_block_timestamp(self):
ts = self.ts.resample(self.block_size)
return (ts.index[-1] - (self.block_size * self.back_window))
def _truncate(self):
"""Truncate the timeserie."""
if self.block_size is not None and not self.ts.empty:
# Change that to remove the amount of block needed to have
# the size <= max_size. A block is a number of "seconds" (a
# timespan)
self.ts = self.ts[self._first_block_timestamp():]
class AggregatedTimeSerie(TimeSerie):
_AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct")
@staticmethod
def _percentile(a, q):
# TODO(jd) Find a way to compute all the percentile in one pass as
# numpy can do numpy.percentile(a, q=[75, 90, 95])
if len(a) > 0:
return numpy.percentile(a, q)
def __init__(self, timestamps=None, values=None,
max_size=None,
sampling=None, aggregation_method='mean'):
"""A time serie that is downsampled.
Used to represent the downsampled timeserie for a single
granularity/aggregation-function pair stored for a metric.
"""
super(AggregatedTimeSerie, self).__init__(timestamps, values)
self.aggregation_method = aggregation_method
m = self._AGG_METHOD_PCT_RE.match(aggregation_method)
if m:
self.aggregation_method_func = functools.partial(
self._percentile, q=float(m.group(1)))
else:
self.aggregation_method_func = aggregation_method
self.sampling = pandas.tseries.frequencies.to_offset(sampling)
self.max_size = max_size
def __eq__(self, other):
return (isinstance(other, AggregatedTimeSerie)
and super(AggregatedTimeSerie, self).__eq__(other)
and self.max_size == other.max_size
and self.sampling == other.sampling
and self.aggregation_method == other.aggregation_method)
def set_values(self, values):
super(AggregatedTimeSerie, self).set_values(values)
# See comments in update()
self._resample(min(values, key=operator.itemgetter(0))[0])
self._truncate()
@classmethod
def from_dict(cls, d):
"""Build a time series from a dict.
The dict format must be datetime as key and values as values.
:param d: The dict.
:returns: A TimeSerie object
"""
timestamps, values = cls._timestamps_and_values_from_dict(d['values'])
return cls(timestamps, values,
max_size=d.get('max_size'),
sampling=d.get('sampling'),
aggregation_method=d.get('aggregation_method', 'mean'))
def to_dict(self):
d = super(AggregatedTimeSerie, self).to_dict()
d.update({
'aggregation_method': self.aggregation_method,
'max_size': self.max_size,
'sampling': self._serialize_time_period(self.sampling),
})
return d
def _truncate(self):
"""Truncate the timeserie."""
if self.max_size is not None:
# Remove empty points if any that could be added by aggregation
self.ts = self.ts.dropna()[-self.max_size:]
def _resample(self, after):
if self.sampling:
self.ts = self.ts[after:].resample(
self.sampling,
how=self.aggregation_method_func).combine_first(
self.ts[:after][:-1])
def update(self, ts):
index = ts.ts.index
first_timestamp = index[0]
last_timestamp = index[-1]
# Build a new time serie excluding all data points in the range of the
# timeserie passed as argument
new_ts = self.ts[:first_timestamp].combine_first(
self.ts[last_timestamp:])
# Build a new timeserie where we replaced the timestamp range covered
# by the timeserie passed as argument
self.ts = ts.ts.combine_first(new_ts)
# Resample starting from the first timestamp we received
# TODO(jd) So this only works correctly because we expect that we are
# not going to replace a range in the middle of our timeserie. So we re
# resample EVERYTHING FROM first timestamp. We should rather resample
# from first timestamp AND TO LAST TIMESTAMP!
self._resample(first_timestamp)
self._truncate()
class TimeSerieArchive(object):
def __init__(self, full_res_timeserie, agg_timeseries):
"""A raw data buffer and a collection of downsampled timeseries.
Used to represent the set of AggregatedTimeSeries for the range of
granularities supported for a metric (for a particular aggregation
function).
In addition, a single BoundTimeSerie acts as the buffer for full-
resolution datapoints feeding into the eager aggregation.
"""
self.full_res_timeserie = full_res_timeserie
self.agg_timeseries = sorted(agg_timeseries,
key=operator.attrgetter("sampling"))
@classmethod
def from_definitions(cls, definitions, aggregation_method='mean',
back_window=0):
"""Create a new collection of archived time series.
:param definition: A list of tuple (sampling, max_size)
:param aggregation_method: Aggregation function to use.
:param back_window: Number of block to use as back window.
"""
definitions = sorted(definitions, key=operator.itemgetter(0))
# The block size is the coarse grained archive definition
block_size = definitions[-1][0]
# Limit the main timeserie to a timespan mapping
return cls(
BoundTimeSerie(
block_size=
|
pandas.tseries.offsets.Nano(block_size * 10e8)
|
pandas.tseries.offsets.Nano
|
import binascii
import datetime
from functools import lru_cache
import os
import pandas as pd
import sqlalchemy
import sqlalchemy.engine.url
import sys
from telemetrydisc.util import LOCAL_DATA_PATH, LOCAL_DB_PATH, LOG_COLUMNS
@lru_cache()
def get_engine(db: str):
if db == "local":
return sqlalchemy.create_engine(f"sqlite:///{LOCAL_DB_PATH }")
elif db == "remote":
server = 'telemetry-disc-data.postgres.database.azure.com'
username = 'tsmanner@telemetry-disc-data'
password = '<PASSWORD>'
url = sqlalchemy.engine.url.URL("postgresql", username, password, server, 5432, "postgres")
return sqlalchemy.create_engine(url, connect_args={"sslmode": "require"})
raise ValueError(f"unrecognized database '{db}'")
def get_logs_table():
return pd.read_sql(
"SELECT * FROM logs",
get_engine("local").connect(),
index_col="log_crc",
parse_dates=["log_date"]
)
def get_raw_data(log_crc: int):
return pd.read_sql(
f"SELECT * FROM raw_data WHERE log_crc={log_crc}",
get_engine("local").connect(),
index_col="timeMS"
# index_col=["timeMS", "log_crc"]
)
def init_db():
# If the local database doesn't exist yet, create it
if not os.path.exists(LOCAL_DB_PATH):
if not os.path.exists(LOCAL_DATA_PATH):
print(f"Creating local data directory '{LOCAL_DATA_PATH}'.")
os.mkdir(LOCAL_DATA_PATH)
print(f"Creating local database '{LOCAL_DB_PATH}'.")
open(LOCAL_DB_PATH, "x").close()
remote_engine = get_engine("remote")
remote_con = remote_engine.connect()
local_engine = get_engine("local")
local_con = local_engine.connect()
for con in [remote_con, local_con]:
con.execute("CREATE TABLE IF NOT EXISTS logs(log_crc INTEGER PRIMARY KEY, log_name TEXT, log_date TIMESTAMP)")
con.execute(
'CREATE TABLE IF NOT EXISTS raw_data('
'log_crc BIGINT, '
'"timeMS" BIGINT, '
'"accelX" FLOAT, '
'"accelY" FLOAT, '
'"accelZ" FLOAT, '
'"gyroX" FLOAT, '
'"gyroY" FLOAT, '
'"gyroZ" FLOAT, '
'"magX" FLOAT, '
'"magY" FLOAT, '
'"magZ" FLOAT, '
'PRIMARY KEY("log_crc", "timeMS")'
')'
)
def reset_db():
engine = get_engine("remote")
con = engine.connect()
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
[con.execute(f"DROP TABLE {table}") for table in meta.tables]
def calculate_crc(filename):
data = pd.read_csv(filename, names=LOG_COLUMNS, index_col="timeMS")
crc_data = pd.DataFrame(data.index, index=data.index)
for column in LOG_COLUMNS[1:]:
crc_data[column] = data[column]
crc = 0
for i in data.index:
crc = binascii.crc32(str(list(data.loc[i])).encode(), crc)
return crc
def load_new_log(filename: str):
local_engine = get_engine("local")
local_con = local_engine.connect()
log_crc = calculate_crc(filename)
crc_table = pd.read_sql("SELECT * FROM logs", local_con, index_col="log_crc", parse_dates=["log_date"])
if log_crc not in crc_table.index:
print(f"Processing '{filename}({log_crc})'...", end="")
crc_table.loc[log_crc] = pd.Series(
{
"log_name": os.path.basename(filename),
"log_date": datetime.datetime.now()
},
)
crc_table.to_sql("logs", local_con, if_exists="replace")
data = pd.read_csv(filename, names=LOG_COLUMNS)
data["log_crc"] = log_crc
data.set_index(["log_crc", "timeMS"], inplace=True)
sys.stdout.flush()
data.to_sql("raw_data", local_con, if_exists="append")
print("done.")
def sync_log_data():
remote_engine = get_engine("remote")
remote_con = remote_engine.connect()
local_engine = get_engine("local")
local_con = local_engine.connect()
meta = sqlalchemy.MetaData()
meta.reflect(bind=remote_engine)
remote_crc_table =
|
pd.read_sql("SELECT * FROM logs", remote_con, index_col="log_crc")
|
pandas.read_sql
|
import pandas as pd
class GazetteerAdmLoc:
adm1_gztr = 'gazetteer/prov_gazetteer.csv'
adm2_gztr = 'gazetteer/kota_kab_gazetteer.csv'
adm3_gztr = 'gazetteer/kecamatan_gazetteer.csv'
def __init__(self):
df_adm1 =
|
pd.read_csv(self.adm1_gztr)
|
pandas.read_csv
|
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec =
|
pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
|
pandas.Series
|
"""
Investigate how battery depletion rate depends upon
air temperature (and day of year).
"""
import numpy as np
import pandas as pd
import math
import re
import os
import sys
import base64
import time
import utm
import datetime
from html.parser import HTMLParser
from django.core.mail import send_mail
from django.conf import settings
# from helper_functions import color_negative_red, col_shade, set_up_bokeh_ts_figure
# from helper_functions import load_data, find_newest_message
import statsmodels.api as sm
import scipy
from bokeh.io import curdoc
from bokeh.layouts import row, column, widgetbox
from bokeh.models import ColumnDataSource, Range1d, LinearAxis, Spacer, Band, CDSView
from bokeh.models.filters import Filter, GroupFilter
from bokeh.models import LassoSelectTool, BoxSelectTool, Legend, LegendItem
from bokeh.models import Label
from bokeh.models.widgets import PreText, Select, Slider, Div, Paragraph, Button
from bokeh.plotting import figure, output_file, show
from bokeh.palettes import Spectral11, Spectral6, Viridis11
from bokeh.resources import CDN
from bokeh.embed import file_html
try:
from functools import lru_cache
except ImportError:
# Python 2 does stdlib does not have lru_cache so let's just
# create a dummy decorator to avoid crashing
print("WARNING: Cache for this example is available on Python 3 only.")
def lru_cache():
def dec(f):
def _(*args, **kws):
return f(*args, **kws)
return _
return dec
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'data/')
IMG_DIR = os.path.join(BASE_DIR, "images/")
REPORT_DIR = os.path.join(BASE_DIR, "reports/")
FILENAME = 'Neon_Test_Site.csv'
@lru_cache()
def load_data():
fname = os.path.join(DATA_DIR, FILENAME)
df =
|
pd.read_csv(fname, header=0)
|
pandas.read_csv
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
|
assert_series_equal(left <= right, expected)
|
pandas.util.testing.assert_series_equal
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import re
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple
import glob
import xml.etree.ElementTree as ET
def xml_to_csv(self):
fh = open(self.txt_path, "r")
xml_list = []
for line in fh:
file = os.path.join(line.strip()+".xml")
print(file)
# for xml_file in glob.glob(self.xmlpath + '\\' + file):
xml_file = os.path.join(self.xmlpath, file)
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
self.xml_df = pd.DataFrame(xml_list, columns=column_name)
fh.close()
def create_label_dict(self):
number = []
label = []
with open(self.pbtxt_path, encoding='utf-8') as f:
txt = f.read()
lbyl = txt.splitlines()
for i in range(len(lbyl)):
data = lbyl[i].strip()
if "id:" in data:
num = re.findall('\d+', data)
number.append(int(num[0]))
elif "name:" in data:
name = data[data.find("'") + 1:-1]
label.append(name)
self.dictionary = dict(zip(label, number))
# TO-DO replace this with label map
def class_text_to_int(row_label, dictionary):
if row_label in dictionary:
return dictionary[row_label]
else:
None
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path, dictionary):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename))+'.jpg', 'rb') as fid:
#with tf.compat.v1.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
#with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class'], dictionary))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def generate_tf_record(self):
for csv_files in os.listdir(self.output_directory):
if csv_files.endswith(".csv"):
print(csv_files)
csv_input = os.path.join(self.output_directory, csv_files)
file = re.split(r'_', csv_files)[0]
rcd_file = file + ".record"
output_path = os.path.join(self.output_directory, rcd_file)
writer = tf.python_io.TFRecordWriter(output_path)
#writer = tf.compat.v1.python_io.TFRecordWriter(output_path)
examples =
|
pd.read_csv(csv_input)
|
pandas.read_csv
|
import time
import pandas as pd
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import numba
from numba import jit
import numpy as np
import matplotlib.pyplot
from utility import ols_lstsq, ols_lstsq_raw
df =
|
pd.read_pickle('generated_ols_data.pickle')
|
pandas.read_pickle
|
"""TrackML scoring metric"""
__authors__ = ['<NAME>', '<NAME>', '<NAME>',
'<NAME>']
import numpy
import pandas
def _analyze_tracks(truth, submission):
"""Compute the majority particle, hit counts, and weight for each track.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
Returns
-------
pandas.DataFrame
Contains track_id, nhits, major_particle_id, major_particle_nhits,
major_nhits, and major_weight columns.
"""
# true number of hits for each particle_id
particles_nhits = truth['particle_id'].value_counts(sort=False)
total_weight = truth['weight'].sum()
# combined event with minimal reconstructed and truth information
event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']],
submission[['hit_id', 'track_id']],
on=['hit_id'], how='left', validate='one_to_one')
event.drop('hit_id', axis=1, inplace=True)
event.sort_values(by=['track_id', 'particle_id'], inplace=True)
# ASSUMPTIONs: 0 <= track_id, 0 <= particle_id
tracks = []
# running sum for the reconstructed track we are currently in
rec_track_id = -1
rec_nhits = 0
# running sum for the particle we are currently in (in this track_id)
cur_particle_id = -1
cur_nhits = 0
cur_weight = 0
# majority particle with most hits up to now (in this track_id)
maj_particle_id = -1
maj_nhits = 0
maj_weight = 0
for hit in event.itertuples(index=False):
# we reached the next track so we need to finish the current one
if (rec_track_id != -1) and (rec_track_id != hit.track_id):
# could be that the current particle is the majority one
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for this track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits,
maj_weight / total_weight))
# setup running values for next track (or first)
if rec_track_id != hit.track_id:
rec_track_id = hit.track_id
rec_nhits = 1
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
maj_particle_id = -1
maj_nhits = 0
maj_weights = 0
continue
# hit is part of the current reconstructed track
rec_nhits += 1
# reached new particle within the same reconstructed track
if cur_particle_id != hit.particle_id:
# check if last particle has more hits than the majority one
# if yes, set the last particle as the new majority particle
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# reset runnig values for current particle
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
# hit belongs to the same particle within the same reconstructed track
else:
cur_nhits += 1
cur_weight += hit.weight
# last track is not handled inside the loop
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for the last track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight))
cols = ['track_id', 'nhits',
'major_particle_id', 'major_particle_nhits',
'major_nhits', 'major_weight']
return
|
pandas.DataFrame.from_records(tracks, columns=cols)
|
pandas.DataFrame.from_records
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
class LinearRegression(object):
def __init__(self,filePath):
self.data = pd.read_csv(filePath,header=None)
self.dataArray = np.array([])
self.W = np.array([])
self.labelEncoder = preprocessing.LabelEncoder()
def preprocess(self,seed=None):
self.data[4] = self.labelEncoder.fit_transform(self.data[4])
self.dataArray = self.data.to_numpy()
if seed != None:
np.random.seed(seed)
np.random.shuffle(self.dataArray)
def getWeights(self):
return self.W
def setWeights(self,W):
self.W = W
def getPreprocessedData(self):
return self.dataArray
def predict(self,A,W):
predictedOutput = np.dot(A,W)
po = np.where(predictedOutput <= 0.45,0,predictedOutput)
po = np.where((po>0.45)&(po<=1.45),1,po)
po = np.where(po>1.45,2,po)
return po
def calculateBeta(self,A,Y):
AtA_inverse = np.linalg.inv(np.dot(A.T,A))
AtY = np.dot(A.T,Y)
W = np.dot(AtA_inverse,AtY)
return W
def train(self,fold=3):
partition = np.array_split(self.dataArray,fold)
leastError = 100.0
for k in range(fold):
xTrain = np.array([])
xTest = np.array([])
for f in range(fold):
if k==f:
xTest = partition[f]
else:
if xTrain.size==0:
xTrain = partition[f]
else:
xTrain = np.concatenate((xTrain,partition[f]))
aTrain = xTrain[:,:4]
aTest = xTest[:,:4]
yTrain = xTrain[:,4]
yTest = xTest[:,4]
yTrain = np.expand_dims(yTrain,axis=1)
yTest = np.expand_dims(yTest,axis=1)
W = self.calculateBeta(aTrain,yTrain)
po = self.predict(aTest,W)
e = self.calculatePredictionError(po,yTest)
if e<leastError:
leastError = e
self.W = W
def calculatePredictionError(self,predicted,target):
res = np.equal(predicted,target)
return 100 * ((np.size(res) - np.count_nonzero(res))/np.size(res))
def writeOutputToFile(self,i,o):
out =
|
pd.DataFrame(i)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
from datetime import datetime, timedelta
def get_data():
# Load json data
with open('../data/json_file.json') as data_file:
patients = json.load(data_file)
print("JSON file loaded")
# Features computation
print("Features computation launched...")
visits = []
for patient in patients.values():
for i in range(1, len(patient['visits']) + 1):
visits.append(patient['visits'][str(i)])
n_visits = len(visits)
print("n_visits = %s" % n_visits)
# Features DataFrame with encounter_nums index
encounter_nums = [int(visit.get('encounter_num')) for visit in visits]
X = pd.DataFrame(index=encounter_nums)
# Time vector & censoring indicator
print("Adding labels...", end="")
next_visit = [visit.get('next_visit') for visit in visits]
T = np.array([1e10 if str(t) == 'none' else t for t in next_visit]).astype(
int)
end_dates = pd.to_datetime([visit.get('end_date') for visit in visits])
start_dates = pd.to_datetime([visit.get('start_date') for visit in visits])
C = pd.to_datetime('2016-01-15 00:00:00') - end_dates
days, seconds = C.days, C.seconds
C = days * 24 + seconds // 3600 # in hours (discrete)
delta = (T <= C).astype(int)
Y = T
Y[delta == 0] = C[delta == 0]
labels = pd.DataFrame({'Y': Y, 'delta': delta}, index=encounter_nums)
X = pd.concat([X, labels], axis=1)
print("Done!")
# Basic features
print("Adding basic features...", end="")
# Add also patient_num & encounter_num for future random choice
patient_num, encounter_num = [], []
sex, baseline_HB, genotype_SS, age, transfu_count = [], [], [], [], []
LS_ALONE, LS_INACTIVE, MH_ACS, MH_AVN, MH_DIALISIS = [], [], [], [], []
MH_HEART_FAILURE, MH_ISCHEMIC_STROKE, MH_LEG_ULCER = [], [], []
MH_NEPHROPATHY, MH_PHTN, MH_PRIAPISM, MH_RETINOPATHY = [], [], [], []
OPIOID_TO_DISCHARGE, ORAL_OPIOID, USED_MORPHINE = [], [], []
USED_OXYCODONE, duration, previous_visit, rea = [], [], [], []
for patient in patients.values():
for _ in range(1, len(patient['visits']) + 1):
patient_num.append(patient['patient_num'])
sex.append(1 if int(patient['sex']) == 1 else 0)
baseline_HB.append(patient['baseline_HB'])
genotype_SS.append(patient['genotype_SS'])
for visit in visits:
encounter_num.append(visit.get('encounter_num'))
age.append(visit.get('age'))
rea.append(visit.get('rea'))
LS_ALONE.append(visit.get('LS_ALONE'))
LS_INACTIVE.append(visit.get('LS_INACTIVE'))
MH_ACS.append(visit.get('MH_ACS'))
MH_AVN.append(visit.get('MH_AVN'))
MH_DIALISIS.append(visit.get('MH_DIALISIS'))
MH_HEART_FAILURE.append(visit.get('MH_HEART_FAILURE'))
MH_ISCHEMIC_STROKE.append(visit.get('MH_ISCHEMIC_STROKE'))
MH_LEG_ULCER.append(visit.get('MH_LEG_ULCER'))
MH_NEPHROPATHY.append(visit.get('MH_NEPHROPATHY'))
MH_PHTN.append(visit.get('MH_PHTN'))
MH_PRIAPISM.append(visit.get('MH_PRIAPISM'))
MH_RETINOPATHY.append(visit.get('MH_RETINOPATHY'))
ORAL_OPIOID.append(visit.get('ORAL_OPIOID'))
USED_MORPHINE.append(visit.get('USED_MORPHINE'))
USED_OXYCODONE.append(visit.get('USED_OXYCODONE'))
duration.append(visit.get('duration'))
transfu_count.append(visit.get('transfu_count'))
MH_ACS = [1 if int(x) == 2 else x for x in MH_ACS]
MH_AVN = [1 if int(x) == 2 else x for x in MH_AVN]
MH_DIALISIS = [1 if int(x) == 2 else x for x in MH_DIALISIS]
MH_HEART_FAILURE = [1 if int(x) == 2 else x for x in MH_HEART_FAILURE]
MH_ISCHEMIC_STROKE = [1 if int(x) == 2 else x for x in MH_ISCHEMIC_STROKE]
MH_LEG_ULCER = [1 if int(x) == 2 else x for x in MH_LEG_ULCER]
MH_NEPHROPATHY = [1 if int(x) == 2 else x for x in MH_NEPHROPATHY]
MH_PHTN = [1 if int(x) == 2 else x for x in MH_PHTN]
MH_PRIAPISM = [1 if int(x) == 2 else x for x in MH_PRIAPISM]
MH_RETINOPATHY = [1 if int(x) == 2 else x for x in MH_RETINOPATHY]
X_basic = pd.DataFrame(
{'patient_num': patient_num, 'encounter_num': encounter_num, 'sex': sex,
'start_dates': start_dates, 'end_dates': end_dates,
'genotype_SS': genotype_SS, 'age': age, 'rea': rea,
'LS_INACTIVE': LS_INACTIVE, 'MH_ACS': MH_ACS, 'MH_AVN': MH_AVN,
'MH_DIALISIS': MH_DIALISIS, 'MH_HEART_FAILURE': MH_HEART_FAILURE,
'MH_ISCHEMIC_STROKE': MH_ISCHEMIC_STROKE,
'MH_LEG_ULCER': MH_LEG_ULCER, 'LS_ALONE': LS_ALONE,
'MH_NEPHROPATHY': MH_NEPHROPATHY, 'MH_PHTN': MH_PHTN,
'MH_PRIAPISM': MH_PRIAPISM, 'MH_RETINOPATHY': MH_RETINOPATHY,
'ORAL_OPIOID': ORAL_OPIOID, 'baseline_HB': baseline_HB,
'USED_MORPHINE': USED_MORPHINE, 'USED_OXYCODONE': USED_OXYCODONE,
'duration': duration, 'transfu_count': transfu_count},
index=encounter_nums)
X_basic = X_basic.convert_objects(convert_numeric=True)
X = pd.concat([X, X_basic], axis=1)
print("Done!")
# Bio data
print("Adding bio features...", end="")
bio_data, bio_names = pd.DataFrame(), []
for visit in visits:
encounter_num = int(visit.get('encounter_num'))
tmp =
|
pd.DataFrame()
|
pandas.DataFrame
|
# @package mktDataAnalysis
# mktDataAnalysis class in charge of creating the needed indicators using the market data
# @author <NAME>
import sys
sys.path.insert(0, r'')
import json
import os
from tradingBot.src.utils.exceptions import BadKwargs, SymbolNotSupported
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
pd.options.mode.chained_assignment = None
class mktDataAnalysis():
## mktDataAnalysis
# @class mktDataAnalysis
paths = {"mainPath": "tradingBot/dataBase/{}/{}", "subPaths": [
{"id": "indicators", "subPath": "/indicators"},
{"id": "intervals", "subPath": "/intervals"}
]}
def __init__(self, coin=None, pair=None, coinBotObj=None):
self.coinBotObj = coinBotObj
self.dBIntervals = coinBotObj.tmfrmVar
mainPath = self.paths['mainPath'].format(coin, pair)
self.indicPath = mainPath + self.paths['subPaths'][0]["subPath"]
self.DBPath = mainPath + self.paths['subPaths'][1]["subPath"]
self.rptIndic = {}
self.indicIntervals = []
self.getIndInterval = []
self.coin = coin
self.pair = pair
#This section will be deleted in future
if not os.path.exists(self.indicPath):
os.makedirs(self.indicPath)
#TODO ACCESS THE ACTUALIZED DB FROM CB
#for nameDB in self.dBIntervals:
# setattr(self, nameDB, getattr(self.coinBotObj, nameDB))
#TODO IF WE ELIMINATE ALL INDICATORS THEN WHY WE OPEN THEM HERE.
#self.openInd()
#self.delAllIndicator()
def newIndicator(self, indicator=None, period=None, interval=None):
# @fn newIndicator
# @brief Adds a new indicator
# @param indicator The name of the indicator. Send string (e.g., "WMA")
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @exception False If indicator is already created.
if not isinstance(period, int):
return False
interval = self._getIntvl(timeframe=interval)
id = str(period) + indicator + interval
for indicFiles in self.getIndInterval:
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
if line["id"] == id:
self.rptIndic[id] = self.rptIndic[id] + 1 if id in self.rptIndic else 1
return False
"""
if indicator == "RSI" and indicator == \
line['indicator'] and interval == line['interval']:
return False
"""
if not self.checkIntDB(interval=interval):
return False
newInd = {
"indicator": indicator,
"interval": interval,
"period": period,
"id": id,
"start": 0,
"end": 0,
"data": []
}
newInd['data'] = self.actlIndData(indicator=indicator, period=period, interval=interval,\
start=None, end=None, int_unix=None)
if interval not in self.indicIntervals:
self.indicIntervals.append(interval)
if not newInd['data']:
return False
newInd['start'] = newInd['data'][0]['timestamp']
newInd['end'] = newInd['data'][-1]['timestamp']
indic = getattr(self, "indic_" + interval)
indic["indicators"].append(newInd)
setattr(self, "indic_" + interval, indic)
def delIndicator(self, id=None):
# @fn delIndicator
# @brief Delates one indicator
# @param id ID of the indicator. Send string (e.g., "80SMA1h")
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
if line["id"] == id:
if id in self.rptIndic:
self.rptIndic[id] = self.rptIndic[id] - 1
if self.rptIndic[id] >= 0:
newInd["indicators"].append(line)
else:
self.rptIndic[id] = -1
if not line["id"] == id:
newInd["indicators"].append(line)
setattr(self, indicFiles["indicator_int"], newInd)
def delAllIndicator(self):
# @fn delAllIndicator
# @brief Delates all indicators that we currently have.
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
setattr(self, indicFiles["indicator_int"], newInd)
def actlIndicators(self):
# @fn actlIndicators
# @brief Actualize all indicators from all intervals
for indicFiles in self.getIndInterval:
newInd = {"indicators": []}
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
info = {
"indicator": line['indicator'],
"interval": line['interval'],
"period": line['period'],
"id": line['id'],
"start": line['data'][0]['timestamp'],
"end": line['data'][-1]['timestamp'],
"data": line['data']
}
int_unix = info['data'][1]['timestamp'] - info['data'][0]['timestamp']
newData = self.actlIndData(indicator=info['indicator'], period=info['period'],\
interval=info['interval'], start=info['start'], end=info['end'], int_unix=int_unix)
if newData[0]['timestamp'] == info['end']:
info['data'][-1] = newData[0]
else:
del info['data'][0:len(newData)]
info['data'] += newData
info['start'] = info['data'][0]['timestamp']
info['end'] = info['data'][-1]['timestamp']
newInd["indicators"].append(info)
setattr(self, indicFiles["indicator_int"], newInd)
def actlIndData(self, indicator=None, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn actlIndicators
# @brief Send the indicator to be actualized to its respective function.
# @param indicator The name of the indicator. Send string (e.g., "WMA")
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return data Data actualized in array format with json's as values.
if "EMA" == indicator:
data = self.indEMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "RSI" == indicator:
data = self.indRSI(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "SMA" == indicator:
data = self.indSMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "WMA" == indicator:
data = self.indWMA(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "ATR" == indicator:
data = self.indATR(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
elif "BB" == indicator:
data = self.indBB(period=period, interval=interval, start=start, end=end, int_unix=int_unix)
else:
return False
return data
def viewIndicators(self):
# @fn viewIndicators
# @brief View all indicators
# @return data All indicators in command line
indica = {"indicators": []}
for indicFiles in self.getIndInterval:
indicInterval = getattr(self, indicFiles["indicator_int"])
for line in indicInterval['indicators']:
newInd = {
"indicator": line['indicator'],
"interval": line['interval'],
"period": line['period'],
"id": line['id'],
}
indica["indicators"].append(newInd)
data = pd.DataFrame.from_dict(indica['indicators'], orient='columns')
data = data.sort_values(by=['interval', 'indicator', 'period'])
data = data.reindex(columns=['interval', 'indicator', 'period', 'id'])
print(data.to_string(index=False))
def indRSI(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indRSI
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
delta = data['close'].diff(1)
delta.dropna(inplace=True)
positive = delta.copy()
negative = delta.copy()
positive[positive < 0] = 0
negative[negative > 0] = 0
average_gain = positive.rolling(window=period).mean()
average_loss = abs(negative.rolling(window=period).mean())
relative_strength = average_gain / average_loss
rsi = 100.0 - (100.0 / (1.0 + relative_strength))
actData = pd.DataFrame()
actData['timestamp'] = data['timestamp']
actData['value'] = rsi
actData = json.loads(actData.to_json(orient="records"))
return actData[kLines:]
data = getattr(self.coinBotObj, interval)
data = pd.DataFrame.from_dict(data, orient='columns')
data["close"] = pd.to_numeric(data["close"])
startDB = data.iloc[0]['timestamp']
endDB = data.iloc[-1]['timestamp']
if int_unix == None or end > data.iloc[-1]['timestamp']:
actData = calcData(data=data, kLines=0)
elif end == endDB:
actData = calcData(data=data[-(period + 1):], kLines=-1)
else:
opData, kLines = self.checkLen(period=period, end=end, endDB=endDB, int_unix=int_unix)
actData = calcData(data=data[opData:], kLines=kLines)
return actData
def indEMA(self, period=None, interval=None, start=None, end=None, int_unix=None):
# @fn indEMA
# @brief Actualize each indicator depending on all variables.
# @param period Period at which the indicador will be working. Send int (e.g., 100)
# @param interval Interval of the analisis. Send tuple with int and string. (e.g, (1, "hour"))
# @param start Start of indicator in unix format. Send int (e.g., 1616032800000)
# @param end End of indicator in unix format. Send int (e.g., 1618146000000)
# @param int_unix Interval of indicator expreses in unix format. Send int (e.g., 3600000)
# @return actData Data actualized in array format with json's as values.
def calcData(data=None, kLines=None):
# @fn calcData
# @brief
# @param data
# @param kLines
# @exception
# @return actData
ema = data['close'].ewm(span=period, adjust=False).mean()
actData =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from scipy.signal import lfilter
p_ebni_min_val = pd.read_csv('../data/p_ebni_min_val.csv', header=None, names=['input'], dtype=np.float64)
len_p_ebni_min = p_ebni_min_val.shape[0]
K = np.float64(64 * 60)
qsim = pd.DataFrame(0, index=np.arange(len_p_ebni_min), columns=['output'], dtype=np.float64)
S = np.float64(0)
for t in range(1, len_p_ebni_min):
S = S + p_ebni_min_val.iloc[t, 0]
qsim.iloc[t, 0] = S / K
S = S - qsim.iloc[t, 0]
dummy = lfilter(np.ones(60), 1, qsim['output'], axis=0)
output_03 =
|
pd.DataFrame(dummy[59::60], columns=['output'], dtype=np.float64)
|
pandas.DataFrame
|
import os
import re
import sys
import math
import json
import bokeh
import geopandas
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from bokeh.io.doc import curdoc
from bokeh.layouts import layout
from bokeh.plotting import figure
from bokeh.models.glyphs import Text
from bokeh.application import Application
from bokeh.models.callbacks import CustomJS
from bokeh.plotting import show as plt_show
from bokeh.palettes import brewer,OrRd,YlGn
from bokeh.models.widgets import Button,Select
from bokeh.tile_providers import Vendors,get_provider
from bokeh.io import output_notebook,show,output_file
from bokeh.application.handlers import FunctionHandler
from bokeh.layouts import widgetbox,row,column,gridplot
from bokeh.models import ColumnDataSource,Slider,HoverTool,Select,Div,Range1d,WMTSTileSource,BoxZoomTool,TapTool,Panel,Tabs
from bokeh.models import GeoJSONDataSource,LinearColorMapper,ColorBar,NumeralTickFormatter,LinearAxis,Grid,Label,Band,Legend,LegendItem
verbose=False
enable_GeoJSON_saving=False
DATA_UPDATE_DATE='20-October-2021'
FORECASTS_UPDATE_DATE='19-October-2021'
def apply_corrections(input_df):
for state in list(input_df['state'].values):
input_df.loc[input_df['state']==state,'state']=re.sub('[^A-Za-z ]+', '',str(state))
input_df.loc[input_df['state']=='Karanataka','state']='Karnataka'
input_df.loc[input_df['state']=='Himanchal Pradesh','state']='Himachal Pradesh'
input_df.loc[input_df['state']=='Telengana','state']='Telangana'
input_df.loc[input_df['state']=='Dadra and Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Dadar Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Dadra Nagar Haveli','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Daman & Diu','state']='Dadra and Nagar Haveli and Daman and Diu'
input_df.loc[input_df['state']=='Daman and Diu','state']='Dadra and Nagar Haveli and Daman and Diu'
return input_df
def os_style_formatter(input_str):
try:
os_env=os.environ['OS']
except:
os_env='unknown'
return str(input_str).replace('/', "\\") if os_env=='Windows_NT' else str(input_str)
try:
India_statewise=geopandas.read_file('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/GeoJSON_assets/India_statewise_minified.geojson')
India_stats=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/Population_stats_India_statewise.csv')
covid19_data=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/COVID19_India_statewise.csv')
preds_df=pd.read_csv('https://raw.githubusercontent.com/MoadComputer/covid19-visualization/main/data/Coronavirus_stats/India/experimental/output_preds.csv')
except:
India_GeoJSON_repoFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/GeoJSON_assets/India_statewise_minified.geojson')
covid19_statewise_repoFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/COVID19_India_statewise.csv')
India_statewise_statsFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/Population_stats_India_statewise.csv')
saved_predsFile=os_style_formatter(
'./GitHub/MoadComputer/covid19-visualization/data/Coronavirus_stats/India/experimental/output_preds.csv')
if os.path.exists(India_GeoJSON_repoFile):
India_statewise=geopandas.read_file(India_GeoJSON_repoFile)
print('Reading India GeoJSON file from saved repo ...')
else:
sys.exit('Failed to read GeoJSON file for India ...')
if os.path.exists(covid19_statewise_repoFile):
covid19_data=pd.read_csv(covid19_statewise_repoFile)
print('Reading India COVID19 file from saved repo ...')
else:
sys.exit('Failed to read India COVID19 file ...')
if os.path.exists(India_statewise_statsFile):
India_stats=pd.read_csv(India_statewise_statsFile)
print('Reading India stats file from saved repo ...')
else:
sys.exit('Failed to read India stats file ...')
if os.path.exists(saved_predsFile):
preds_df=pd.read_csv(saved_predsFile)
else:
print('Advanced mode disabled ...')
advanced_mode=False
India_statewise=apply_corrections(India_statewise)
if enable_GeoJSON_saving:
India_statewise.to_file("India_statewise_minified.geojson", driver='GeoJSON')
India_statewise=India_statewise.to_crs("EPSG:3395")
India_stats=apply_corrections(India_stats)
if len(covid19_data.columns) ==6:
del covid19_data['active_cases']
covid19_data=apply_corrections(covid19_data)
covid19_data=
|
pd.merge(covid19_data, India_stats, on='state', how='left')
|
pandas.merge
|
import pandas as pd
import numpy as np
from scipy import optimize
from scipy.integrate import odeint
class SIR_Model():
'''This class is programmed for SIR model of epidemiology
Args:
-------
df: pd.DataFrame of large dataset
country: select country
population: total population of selected country
percentage: percentage of total population which is susceptable
'''
def __init__(self, df, country, population, percentage=5):
self.df = df
self.country = country
self.population = population
self.percentage = percentage
self._get_SIR_initials()
def _calculate_susceptible(self):
'''Calculation of total susceptable based on selected percentage'''
self.N0 = (self.percentage/100)*self.population # max susceptible population, 10% of pupulation as default
def _get_index(self, percentage):
'''Day of initially infected population
'''
self._calculate_susceptible()
self.idx_I0 = np.where(self.df[self.country] > self.N0*(percentage/100))[0][0]
def _initial_infected(self, percentage=0.05):
'''Initially infected population based on percentage.
Args:
----
percentage: user specified percentage
Initially infected = Susceptable population * percentage(user-specified)
'''
self._get_index(percentage)
self.ydata = np.array(self.df[self.country][self.idx_I0:])
def _set_time(self):
'''Set time period based on initially infected index
'''
self._initial_infected()
self.t = np.arange(len(self.ydata))
def _get_SIR_initials(self, R0=0):
'''Set up initial values for SIR model.
Recovery index is intially set to zero.
'''
self._set_time()
self.I0 = self.ydata[0]
self.S0 = self.N0-self.I0
self.R0 = R0
self.SIR = np.array([self.S0, self.I0, self.R0])
def calculate_SIR(self, SIR, t, beta, gamma):
''' Simple SIR model
S: susceptible population
I: infected people
R: recovered people
beta: infection rate
gamma: recovery rate
t: time-step --> required for solving differential equation
Overall condition is that the sum of changes (differnces) sum up to 0
dS+dI+dR=0
S+I+R= N (constant size of population)
'''
S,I,R = SIR
dS_dt = -beta*S*I/self.N0
dI_dt = beta*S*I/self.N0 - gamma*I
dR_dt = gamma*I
return dS_dt, dI_dt, dR_dt
def fit_odeint(self, x, beta, gamma):
''' Helper function for the integration
'''
self._get_SIR_initials()
return odeint(self.calculate_SIR, (self.S0, self.I0, self.R0), self.t, args=(beta, gamma))[:,1]
def fitted_curve(self, printout=True):
'''Fitting of curve by using optimize.curve_fit form scipy libaray.
'''
self.popt, self.pcov = optimize.curve_fit(self.fit_odeint, self.t, self.ydata)
self.perr = np.sqrt(np.diag(self.pcov))
if printout:
print('standard deviation errors : ',str(self.perr), ' start infect:',self.ydata[0])
print("Optimal parameters: beta =", self.popt[0], " and gamma = ", self.popt[1])
self.fitted = self.fit_odeint(self.t, *self.popt)
# get the final fitted curve
return self.fitted
def get_optimum_beta_gamma(df, country, susceptable_perc=5, period='default'):
# get world population
# plausiblization for dashboard
try:
df_population = pd.read_csv('../data/processed/world_population.csv',sep=';', index_col=0)
population = df_population.T[country].values[0]
except:
df_population = pd.read_csv('data/processed/world_population.csv',sep=';', index_col=0)
population = df_population.T[country].values[0]
if period != 'default':
# set periods
periods = []
periods.append([39,70])
for i in np.arange(70,len(df)-1,period)[:-1]:
periods.append([i, i+period])
periods.append([np.arange(70,len(df)-1,period)[-1],len(df)-1])
names = ['Period '+ str(n) for n in range(len(periods))]
time_period = [str(df_confirmed.date[p[0]])[:10]+' to '+str(df_confirmed.date[p[1]])[:10] for p in periods]
else:
# rather than using fixed periods, we will use following periods for better approximation
periods = [[39,70], [70,80], [80,100], [100,130], [130,180], [180,len(df)-1]]
time_period = ['March 2020 ',
'1-10th April 2020 ',
'10-30th April 2020 ',
'May 2020 ',
'June-July 2020 ',
'From August 2020 ']
names = ['Virus spreaded ',
'People awared ',
'People take precautions ',
'Start recovering ',
'Constant spread ',
'Second wave ']
# fit curve
fit_line = np.array([])
dyn_beta = []
dyn_gamma = []
dyn_R0 = []
summary = []
for n, element in enumerate(periods):
try:
OBJ_SIR = SIR_Model(df[element[0]:element[1]], country= country, population = population, percentage=susceptable_perc)
fit_line = np.concatenate([fit_line, OBJ_SIR.fitted_curve(printout=False)])
dyn_beta.append(OBJ_SIR.popt[0])
dyn_gamma.append(OBJ_SIR.popt[1])
dyn_R0.append(OBJ_SIR.popt[0]/OBJ_SIR.popt[1])
except:
periods = periods[n+1:]
dyn_beta.append(np.nan)
dyn_gamma.append(np.nan)
dyn_R0.append(np.nan)
summary.append({'Time period':time_period[n],
'Actions': names[n],
'Beta': abs(round(dyn_beta[n],3)),
'Gamma': abs(np.round(dyn_gamma[n],3)),
'R0': abs(np.round(dyn_R0[n],3))})
# get strating point
idx = SIR_Model(df, country= country, population = population).idx_I0
return fit_line, idx,
|
pd.DataFrame(summary)
|
pandas.DataFrame
|
import os
import json
import glob
import SimpleITK as sitk
import pandas as pd
from dcm_read import dicom_metainfo,dicom2array
train_path=r"..\..\dataset\lumbar_train51\train"
json_path=r"..\..\dataset\lumbar_train51\lumbar_train51_annotation.json"
# train_path=r"C:\Users\Administrator\Desktop\lumbar\dataset\lumbar_train51\train"
# json_path=r"C:\Users\Administrator\Desktop\lumbar\dataset\lumbar_train51\lumbar_train51_annotation.json"
def get_info(train_path,json_path):
annotation_info = pd.DataFrame(columns=('studyUid','seriesUid','instanceUid','annotation'))
json_df = pd.read_json(json_path)
for idx in json_df.index:
studyUid = json_df.loc[idx,"studyUid"]
seriesUid = json_df.loc[idx,"data"][0]['seriesUid']
instanceUid = json_df.loc[idx,"data"][0]['instanceUid']
annotation = json_df.loc[idx,"data"][0]['annotation']
row = pd.Series({'studyUid':studyUid,'seriesUid':seriesUid,'instanceUid':instanceUid,'annotation':annotation})
annotation_info = annotation_info.append(row,ignore_index=True)
dcm_paths = glob.glob(os.path.join(train_path,"**","**.dcm"))
tag_list = ['0020|000d','0020|000e','0008|0018']
dcm_info =
|
pd.DataFrame(columns=('dcmPath','studyUid','seriesUid','instanceUid'))
|
pandas.DataFrame
|
"""Main class and helper functions.
"""
import os
from enum import Enum
from collections import OrderedDict
from functools import reduce
from pathlib import Path
from typing import Any, Union, Optional
from typing import Iterable, Sized, Sequence, Mapping, MutableMapping
from typing import Tuple, List, Dict, KeysView
from copy import deepcopy
import numpy as np
from numpy import ma
import pandas as pd
from numpy.lib.recfunctions import rec_drop_fields
from pandas.core.index import RangeIndex
from pandas.api.types import is_string_dtype, is_categorical
from scipy import sparse
from scipy.sparse import issparse
from scipy.sparse.sputils import IndexMixin
from natsort import natsorted
# try importing zarr
try:
from zarr.core import Array as ZarrArray
except ImportError:
class ZarrArray:
@staticmethod
def __rep__():
return 'mock zarr.core.Array'
# try importing zappy
try:
from zappy.base import ZappyArray
except ImportError:
class ZappyArray:
@staticmethod
def __rep__():
return 'mock zappy.base.ZappyArray'
from . import h5py
from .layers import AnnDataLayers
from . import utils
from .utils import Index, get_n_items_idx
from .logging import anndata_logger as logger
from .compat import PathLike
class StorageType(Enum):
Array = np.ndarray
Masked = ma.MaskedArray
Sparse = sparse.spmatrix
ZarrArry = ZarrArray
ZappyArry = ZappyArray
@classmethod
def classes(cls):
print(ZarrArray)
return tuple(c.value for c in cls.__members__.values())
class BoundRecArr(np.recarray):
"""A :class:`numpy.recarray` to which fields can be added using ``.['key']``.
To enable this, it is bound to a instance of AnnData.
"""
_attr_choices = ['obsm', 'varm']
def __new__(cls, input_array: np.ndarray, parent: Any, attr: str):
"""
Parameters
----------
input_array
A (structured) numpy array.
parent
Any object to which the BoundRecArr shall be bound to.
attr
The name of the attribute as which it appears in parent.
"""
arr = np.asarray(input_array).view(cls)
arr._parent = parent
arr._attr = attr
return arr
def __array_finalize__(self, obj: Any):
if obj is None: return
self._parent = getattr(obj, '_parent', None)
self._attr = getattr(obj, '_attr', None)
def __reduce__(self) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
pickled_state = super().__reduce__()
new_state = pickled_state[2] + (self.__dict__, )
return pickled_state[0], pickled_state[1], new_state
def __setstate__(self, state: Sequence[Mapping[str, Any]]):
for k, v in state[-1].items():
self.__setattr__(k, v)
super().__setstate__(state[0:-1])
def copy(self, order='C') -> 'BoundRecArr':
new = super().copy()
new._parent = self._parent
return new
def flipped(self) -> 'BoundRecArr':
new_attr = (self._attr_choices[1] if self._attr == self._attr_choices[0]
else self._attr_choices[0])
return BoundRecArr(self, self._parent, new_attr)
def keys(self) -> Tuple[str, ...]:
return self.dtype.names
def __setitem__(self, key: str, arr: np.ndarray):
if not isinstance(arr, np.ndarray):
raise ValueError(
'Can only assign numpy ndarrays to .{}[{!r}], not objects of class {}'
.format(self._attr, key, type(arr))
)
if arr.ndim == 1:
raise ValueError('Use adata.obs or adata.var for 1-dimensional arrays.')
if self.shape[0] != arr.shape[0]:
raise ValueError(
'Can only assign an array of same length ({}), not of length {}.'
.format(self.shape[0], arr.shape[0])
)
# the following always allocates a new array
# even if the key already exists and dimensions match
# TODO: one could check for this case
# dtype
merged_dtype = []
found_key = False
for descr in self.dtype.descr:
if descr[0] == key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
found_key = True
else:
merged_dtype.append(descr)
if not found_key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
# create new array
new = np.empty(len(self), dtype=merged_dtype)
# fill the array
for name in new.dtype.names:
if name == key:
new[name] = arr
else:
new[name] = self[name]
# make it a BoundRecArr
# TODO: why can we not do this step before filling the array?
new = BoundRecArr(new, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def __delitem__(self, key: str):
"""Delete field with name."""
if key not in self.dtype.names:
raise ValueError(
'Currently, can only delete single names from {}.'
.format(self.dtype.names)
)
new_array = rec_drop_fields(self, key)
new = BoundRecArr(new_array, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None))
for key in self.keys():
value = self[key]
for icolumn, column in enumerate(value.T):
df['{}{}'.format(key, icolumn+1)] = column
return df
# for backwards compat
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None
# for backwards compat
def _gen_keys_from_multicol_key(key_multicol, n_keys):
"""Generates single-column keys from multicolumn key."""
keys = [('{}{:03}of{:03}')
.format(key_multicol, i+1, n_keys) for i in range(n_keys)]
return keys
def df_to_records_fixed_width(df, var_len_str=True):
uns = {} # unstructured dictionary for storing categories
names = ['index']
if is_string_dtype(df.index):
if var_len_str:
index = df.index.values.astype(h5py.special_dtype(vlen=str))
else:
max_len_index = 0 if 0 in df.shape else df.index.map(len).max()
index = df.index.values.astype('S{}'.format(max_len_index))
else:
index = df.index.values
arrays = [index]
for k in df.columns:
names.append(k)
if is_string_dtype(df[k]) and not is_categorical(df[k]):
if var_len_str:
arrays.append(df[k].values.astype(h5py.special_dtype(vlen=str)))
else:
lengths = df[k].map(len)
if is_categorical(lengths): lengths = lengths.cat.as_ordered()
arrays.append(df[k].values.astype('S{}'.format(lengths.max())))
elif is_categorical(df[k]):
uns[k + '_categories'] = df[k].cat.categories
arrays.append(df[k].cat.codes)
else:
arrays.append(df[k].values)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}), uns
def _check_2d_shape(X):
"""Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
"""
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError('X needs to be 2-dimensional, not '
'{}-dimensional.'.format(len(X.shape)))
def _normalize_index(index, names):
if not isinstance(names, RangeIndex):
assert names.dtype != float and names.dtype != int, \
'Don’t call _normalize_index with non-categorical/string names'
# the following is insanely slow for sequences, we replaced it using pandas below
def name_idx(i):
if isinstance(i, str):
# `where` returns an 1-tuple (1D array) of found indices
i_found = np.where(names == i)[0]
if len(i_found) == 0: # returns array of length 0 if nothing is found
raise IndexError(
'Key "{}" is not valid observation/variable name/index.'
.format(i))
i = i_found[0]
return i
if isinstance(index, slice):
start = name_idx(index.start)
stop = name_idx(index.stop)
# string slices can only be inclusive, so +1 in that case
if isinstance(index.stop, str):
stop = None if stop is None else stop + 1
step = index.step
return slice(start, stop, step)
elif isinstance(index, (int, str)):
return name_idx(index)
elif isinstance(index, (Sequence, np.ndarray, pd.Index)):
# here, we replaced the implementation based on name_idx with this
# incredibly faster one
positions = pd.Series(index=names, data=range(len(names)))
positions = positions[index]
if positions.isnull().values.any():
raise KeyError(
'Indices "{}" contain invalid observation/variables names/indices.'
.format(index))
return positions.values
else:
raise IndexError('Unknown index {!r} of type {}'
.format(index, type(index)))
def _gen_dataframe(anno, length, index_names):
if isinstance(anno, pd.DataFrame):
return anno
if anno is None or len(anno) == 0:
_anno = pd.DataFrame(index=RangeIndex(0, length, name=None).astype(str))
else:
for index_name in index_names:
if index_name in anno:
_anno = pd.DataFrame(
anno, index=anno[index_name],
columns=[k for k in anno.keys() if k != index_name])
break
else:
_anno = pd.DataFrame(anno, index=
|
RangeIndex(0, length, name=None)
|
pandas.core.index.RangeIndex
|
import pandas as pd
s1 = pd.Series([10, 20, 30], name="Total")
s2 =
|
pd.Series(["Jonathan", "Maikao", "Ronald"], name="Clientes")
|
pandas.Series
|
"""List of functions used for this project."""
import ast
import haversine as hs
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, StandardScaler
def rename_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Rename variables from the 2015 CSV file.
Args:
df (pd.DataFrame): The DataFrame from 2015 CSV file
Returns:
pd.DataFrame: A DataFrame
"""
return df.rename(
columns={
"Comment": "Comments",
"GHGEmissions(MetricTonsCO2e)": "TotalGHGEmissions",
"GHGEmissionsIntensity(kgCO2e/ft2)": "GHGEmissionsIntensity",
},
)
def drop_columns(df: pd.DataFrame) -> pd.DataFrame:
"""Drop columns that doesnt exists in 2016 data.
Args:
df (pd.DataFrame): The DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
return df.drop(
columns=[
"SPD Beats",
"Seattle Police Department Micro Community Policing Plan Areas",
"2010 Census Tracts",
"OtherFuelUse(kBtu)",
"City Council Districts",
"Location",
"Zip Codes",
],
)
def concat_data(df: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
"""Concat 2015 and 2016 Data.
Args:
df (pd.DataFrame): The 2015 DataFrame
df2 (pd.DataFrame): The 2016 DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
return pd.concat([df, df2], axis="rows")
def complete_location_data(df: pd.DataFrame) -> pd.DataFrame:
"""Extract Data from Location field in 2015 Data.
Args:
df (pd.DataFrame): The DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
return df.apply(extract_data_from_location, axis="columns")
def extract_data_from_location(row: pd.Series) -> pd.Series:
"""Extract from Location variable more information.
Args:
row (pd.Series): The DataFrame row
Returns:
pd.Series: A DataFrame row
"""
building = row.copy()
parsed_location = ast.literal_eval(building.Location)
building["Latitude"] = parsed_location["latitude"]
building["Longitude"] = parsed_location["longitude"]
parsed_human_address = ast.literal_eval(parsed_location["human_address"])
building["Address"] = parsed_human_address["address"]
building["City"] = parsed_human_address["city"]
building["State"] = parsed_human_address["state"]
building["ZipCode"] = parsed_human_address["zip"]
return building
def clean_variables_names(df: pd.DataFrame) -> pd.DataFrame:
"""Rename and lowercase every variable.
Also rename some of them, like the targets.
Args:
df (pd.DataFrame): The DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
regexp = r"(?<!^)(?=[A-Z])"
df.columns = df.columns.str.strip().str.replace(regexp, "_", regex=True).str.lower()
return df.rename(
columns={
"o_s_e_building_i_d": "building_id",
"numberof_buildings": "number_of_buildings",
"numberof_floors": "number_of_floors",
"property_g_f_a_total": "property_gfa_total",
"property_g_f_a_parking": "property_gfa_parking",
"property_g_f_a_building(s)": "property_gfa_building",
"largest_property_use_type_g_f_a": "largest_property_use_type_gfa",
"second_largest_property_use_type_g_f_a": "second_largest_property_use_type_gfa",
"third_largest_property_use_type_g_f_a": "third_largest_property_use_type_gfa",
"years_e_n_e_r_g_y_s_t_a_r_certified": "years_energy_star_certified",
"e_n_e_r_g_y_s_t_a_r_score": "energystar_score",
"site_e_u_i(k_btu/sf)": "site_eui",
"site_e_u_i_w_n(k_btu/sf)": "site_euiwn",
"source_e_u_i(k_btu/sf)": "source_eui",
"source_e_u_i_w_n(k_btu/sf)": "source_euiwn",
"site_energy_use(k_btu)": "site_energy_use_target",
"site_energy_use_w_n(k_btu)": "site_energy_use_wn",
"steam_use(k_btu)": "steam_use",
"electricity(k_wh)": "electricity_kwh",
"electricity(k_btu)": "electricity",
"natural_gas(therms)": "natural_gas_therms",
"natural_gas(k_btu)": "natural_gas",
"total_g_h_g_emissions": "emissions_target",
"g_h_g_emissions_intensity": "emissions_intensity",
},
)
def clean_data(df: pd.DataFrame) -> pd.DataFrame:
"""Clean the Data (huge function that needs to be splited)
Args:
df (pd.DataFrame): the DataFrame
Returns:
pd.DataFrame: the DataFrame
"""
df["neighborhood"] = df["neighborhood"].str.lower()
df = df[~df.site_energy_use_target.isna()]
df = df[~df.emissions_target.isna()]
df = df[df.compliance_status == "Compliant"]
# treat latitude and longitude as floats
df["latitude"] = df["latitude"].astype("float")
df["longitude"] = df["longitude"].astype("float")
SEATTLE_COORDS = [47.606, -122.332]
seattle_coords = tuple(SEATTLE_COORDS)
df["coords"] = list(zip(df["latitude"], df["longitude"]))
df["distance_to_center"] = df.coords.apply(
lambda x: distance_from(x, seattle_coords)
)
return df
def keep_non_residential(df: pd.DataFrame) -> pd.DataFrame:
"""Keep only the non residential buildings.
Args:
df (pd.DataFrame): The DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
return df[
~df["BuildingType"].isin(
["Multifamily LR (1-4)", "Multifamily MR (5-9)", "Multifamily HR (10+)"]
)
]
def remove_duplicates(df: pd.DataFrame) -> pd.DataFrame:
"""When concatenated, 2015 and 2016 Data duplicates exists.
For numerical variables we use the mean, else we keep 2016 Data.
Args:
df (pd.DataFrame): The DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
MEAN_YEAR = 2015.5
numerical_variables = list(df.select_dtypes("number"))
unique_buildings = df[numerical_variables].groupby("building_id").mean()
unique_buildings["is_agregation"] = unique_buildings["data_year"] == MEAN_YEAR
deduplicated_buildings = df.sort_values("data_year").drop_duplicates(
subset=["building_id"], keep="last"
)
numerical_variables.remove("building_id")
deduplicated_buildings = deduplicated_buildings.drop(
numerical_variables, axis="columns"
)
return deduplicated_buildings.merge(unique_buildings, on="building_id", how="left")
def remove_null_values(df: pd.DataFrame) -> pd.DataFrame:
"""Remove variables with no or a very few values.
Args:
df (pd.DataFrame): The DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
return df.drop(
[
"comments",
"outlier",
"years_energy_star_certified",
"third_largest_property_use_type_gfa",
"third_largest_property_use_type",
],
axis="columns",
)
def remove_useless_variables(df: pd.DataFrame) -> pd.DataFrame:
"""Remove variables with no business value for the subject.
Args:
df (pd.DataFrame): The DataFrame
Returns:
pd.DataFrame: A DataFrame
"""
return df.drop(
["city", "state", "tax_parcel_identification_number"], axis="columns"
)
# https://github.com/JamesIgoe/GoogleFitAnalysis/blob/master/Analysis.ipynb
def corr_filter(x: pd.DataFrame, bound: float) -> pd.DataFrame:
"""List only variable with correlation higher than the selected bound.
Args:
x (pd.DataFrame): the DataFrame
bound (float): the value of correlation
Returns:
pd.DataFrame: A DataFrame
"""
x_corr = x.corr()
x_filtered = x_corr[((x_corr >= bound) | (x_corr <= -bound)) & (x_corr != 1.000)]
return x_filtered
def corr_filter_flattened(x: pd.DataFrame, bound: float) -> pd.DataFrame:
"""Flatten the DataFrame form corrFilter function to remove NaN values.
Args:
x (pd.DataFrame): the DataFrame
bound (float): the bound as previously described
Returns:
pd.DataFrame: the DataFrame
"""
x_filtered = corr_filter(x, bound)
x_flattened = x_filtered.unstack().sort_values().drop_duplicates()
return x_flattened
def filter_for_labels(df: pd.DataFrame, label: str) -> pd.DataFrame:
"""Get the list of variables that needs to be removed regarding a specific target.
Args:
df (pd.DataFrame): the DataFrame of correlations, see corrFilterFlattened()
label (str): the name of the variable
Returns:
pd.DataFrame: the DataFrame
"""
df = df.sort_index()
try:
side_left = df[
label,
]
except Exception:
side_left = pd.DataFrame()
try:
side_right = df[:, label]
except Exception:
side_right = pd.DataFrame()
if side_left.empty and side_right.empty:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
import pyspark
from databricks import koalas as ks
from databricks.koalas.config import set_option, reset_option
from databricks.koalas.frame import DataFrame
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
from databricks.koalas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(ReusedSQLTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def kdf1(self):
return ks.from_pandas(self.pdf1)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
@property
def kdf3(self):
return ks.from_pandas(self.pdf3)
@property
def kdf4(self):
return ks.from_pandas(self.pdf4)
@property
def kdf5(self):
return ks.from_pandas(self.pdf5)
@property
def kdf6(self):
return ks.from_pandas(self.pdf6)
@property
def kser1(self):
return ks.from_pandas(self.pser1)
@property
def kser2(self):
return ks.from_pandas(self.pser2)
@property
def kser3(self):
return ks.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ks.range(10) + ks.range(10)).sort_index(),
(
ks.DataFrame({"id": list(range(10))}) + ks.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ks.DataFrame({"a": [1, 2, 3]}).set_index("a") + ks.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((kdf1.a * kdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] - kdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(kdf1["x"]["a"] - kdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((kser1 - kser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((kser1 * kser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = ks.from_pandas(pdf3)
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b - kdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index())
assert_eq(
(kdf1.a * (kdf2.a * kdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
kdf3.columns = columns
pdf3.columns = columns
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")] - kdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] * (kdf2[("x", "b")] * kdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2 - kser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((kser1 * kser2 * kser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((kser1 - kser2 / kser3).sort_index(), expected)
else:
assert_eq((kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((kser1 + kser2 * kser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), kdf1[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), kdf1.A[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (kdf1.A + 1)[kdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), kdf1.loc[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), kdf1.A.loc[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (kdf1.A + 1).loc[kdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
# a pandas bug?
# assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
# assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
assert_eq(
(kser1 | kser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(kser1 & kser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = kdf1.copy()
kdf4 = kdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
kdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
kdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
kdf5 = ks.from_pandas(pdf5)
kdf6 = ks.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
# TODO: ([kdf1, kdf2.C], [pdf1, pdf2.C]),
([kdf1.A, kdf2], [pdf1.A, pdf2]),
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
([kdf3[("X", "A")], kdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([kdf3, kdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([kdf3[("X", "A")], kdf4], [pdf3[("X", "A")], pdf4]),
([kdf3, kdf4], [pdf3, pdf4]),
([kdf5, kdf6], [pdf5, pdf6]),
([kdf6, kdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ks.concat(kdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
ValueError, "`combine_first` only allows `Series` for parameter `other`"
):
kser1.combine_first(50)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Index different from inserting Series'
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
kdf.insert(0, "a", kser)
pdf.insert(0, "a", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf.insert(0, ("b", "c", ""), kser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
kser1 = ks.Series(["b", "c", np.nan, "g", np.nan])
kser2 = ks.Series(["a", "c", np.nan, np.nan, "h"])
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
kser1 = ks.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 3, 4, 5]),)
kser2 = ks.Series([2, 2, 3, 4, 1], index=pd.Index([5, 4, 3, 2, 1]),)
kser1.compare(kser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
kser1.compare(kser2)
def test_different_columns(self):
kdf1 = self.kdf1
kdf4 = self.kdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
kdf4.columns = columns
pdf4.columns = columns
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["c"] = self.kdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf.columns = columns
pdf.columns = columns
kdf[("y", "c")] = self.kdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
kdf = ks.from_pandas(pdf)
kdf.index.name = None
kdf["NEW"] = ks.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' does not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["b", "c"]] = self.kdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' and 'd' do not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["c", "d"]] = self.kdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf.columns = columns
pdf.columns = columns
kdf[[("y", "c"), ("z", "d")]] = self.kdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf1 = ks.from_pandas(self.pdf1)
pdf1 = self.pdf1
kdf1.columns = columns
pdf1.columns = columns
kdf[["c", "d"]] = kdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["a"] = self.kdf1.a
pdf["a"] = self.pdf1.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
kdf["d"] = self.kdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
kdf[["e", "f"]] = self.kdf3
pdf[["e", "f"]] = self.pdf3
kdf[["b", "c"]] = self.kdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
kdf5 = self.kdf5
kdf6 = self.kdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((kdf5.c - kdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((kdf5["c"] / kdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((kdf5 + kdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["x"] = self.kdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["e"] = self.kdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["c"] = self.kdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["c"]] = self.kdf5
pdf[["c"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["x"]] = self.kdf5
pdf[["x"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf6)
pdf = self.pdf6
kdf[["x", "y"]] = self.kdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf_orig = ks.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
another_kdf = ks.DataFrame(pdf_orig)
kdf.loc[["viper", "sidewinder"], ["shield"]] = -another_kdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -another_kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf = ks.DataFrame(pdf)
another_kdf = ks.DataFrame(pdf)
kdf.iloc[[0, 1, 2], 1] = -another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (2,1)",
# ):
# kdf.iloc[[1, 2], [1]] = -another_kdf.max_speed
kdf.iloc[[0, 1, 2], 1] = 10 * another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (1,)",
# ):
# kdf.iloc[[0], 1] = 10 * another_kdf.max_speed
def test_series_loc_setitem(self):
pdf =
|
pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lambda x: time.strptime(x, "%Y-%m-%d")).map(lambda x: datetime.datetime(*x[:6]))
def get_train(train_user,end_time):
# 取出label day 前一天的记录作为打标记录
data_train = train_user[(train_user['daystime'] == (end_time-datetime.timedelta(days=1)))]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
# 训练样本中,删除重复的样本
data_train = data_train.drop_duplicates(['user_id', 'item_id'])
data_train_ui = data_train['user_id'] / data_train['item_id']
# print(len(data_train))
# 使用label day 的实际购买情况进行打标
data_label = train_user[train_user['daystime'] == end_time]
data_label_buy = data_label[data_label['behavior_type'] == 4]
data_label_buy_ui = data_label_buy['user_id'] / data_label_buy['item_id']
# 对前一天的交互记录进行打标
data_train_labeled = data_train_ui.isin(data_label_buy_ui)
dict = {True: 1, False: 0}
data_train_labeled = data_train_labeled.map(dict)
data_train['label'] = data_train_labeled
return data_train[['user_id', 'item_id','item_category', 'label']]
def get_label_testset(train_user,LabelDay):
# 测试集选为上一天所有的交互数据
data_test = train_user[(train_user['daystime'] == LabelDay)]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
data_test = data_test.drop_duplicates(['user_id', 'item_id'])
return data_test[['user_id', 'item_id','item_category']]
def item_category_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_category,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_category,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_category_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,countAverage,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,buyRate,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_category_feture.fillna(0,inplace=True)
return item_category_feture
def item_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_id,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_unq = data.groupby(by = ['item_id','behavior_type']).agg({"user_id":lambda x:x.nunique()});item_count_unq = item_count_unq.unstack()
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_id,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_id_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,countAverage,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,buyRate,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_unq,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_id_feture.fillna(0,inplace=True)
return item_id_feture
def user_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_count = pd.crosstab(data.user_id,data.behavior_type)
user_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
user_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
user_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayuser_count = pd.crosstab(beforeoneday.user_id,beforeoneday.behavior_type)
countAverage = user_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = user_count[1]/user_count[4]
buyRate['skim'] = user_count[2]/user_count[4]
buyRate['collect'] = user_count[3]/user_count[4]
buyRate.index = user_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = user_count_before5[1]/user_count_before5[4]
buyRate_2['skim'] = user_count_before5[2]/user_count_before5[4]
buyRate_2['collect'] = user_count_before5[3]/user_count_before5[4]
buyRate_2.index = user_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = user_count_before_3[1]/user_count_before_3[4]
buyRate_3['skim'] = user_count_before_3[2]/user_count_before_3[4]
buyRate_3['collect'] = user_count_before_3[3]/user_count_before_3[4]
buyRate_3.index = user_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
long_online = pd.pivot_table(beforeoneday,index=['user_id'],values=['hours'],aggfunc=[np.min,np.max,np.ptp])
user_id_feture = pd.merge(user_count,beforeonedayuser_count,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,countAverage,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,buyRate,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before5,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before_3,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before_2,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,long_online,how='left',right_index=True,left_index=True)
# user_id_feture = pd.merge(user_id_feture,buyRate_2,how='left',right_index=True,left_index=True)
# user_id_feture = pd.merge(user_id_feture,buyRate_3,how='left',right_index=True,left_index=True)
user_id_feture.fillna(0,inplace=True)
return user_id_feture
def user_item_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_item_count = pd.crosstab([data.user_id,data.item_id],data.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
user_item_count_5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
user_item_count_5 = pd.crosstab([beforefiveday.user_id,beforefiveday.item_id],beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
user_item_count_5 = pd.crosstab([beforefiveday.user_id,beforefiveday.item_id],beforefiveday.behavior_type)
user_item_count_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
user_item_count_3 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
user_item_count_3 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
user_item_count_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
user_item_count_2 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
user_item_count_2 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
beforeonedayuser_item_count = pd.crosstab([beforeoneday.user_id,beforeoneday.item_id],beforeoneday.behavior_type)
# _live = user_item_long_touch(data)
max_touchtime = pd.pivot_table(beforeoneday,index=['user_id','item_id'],values=['hours'],aggfunc=[np.min,np.max])
max_touchtype = pd.pivot_table(beforeoneday,index=['user_id','item_id'],values=['behavior_type'],aggfunc=np.max)
user_item_feture = pd.merge(user_item_count,beforeonedayuser_item_count,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,max_touchtime,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,max_touchtype,how='left',right_index=True,left_index=True)
# user_item_feture = pd.merge(user_item_feture,_live,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,user_item_count_5,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,user_item_count_3,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,user_item_count_2,how='left',right_index=True,left_index=True)
user_item_feture.fillna(0,inplace=True)
return user_item_feture
def user_cate_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_item_count = pd.crosstab([data.user_id,data.item_category],data.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
user_cate_count_5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=(end_time-datetime.timedelta(days=5+2))]
user_cate_count_5 =
|
pd.crosstab([beforefiveday.user_id,beforefiveday.item_category],beforefiveday.behavior_type)
|
pandas.crosstab
|
"""
The ``risk_models`` module provides functions for estimating the covariance matrix given
historical returns. Because of the complexity of estimating covariance matrices
(and the importance of efficient computations), this module mostly provides a convenient
wrapper around the underrated `sklearn.covariance` module.
The format of the data input is the same as that in :ref:`expected-returns`.
**Currently implemented:**
- sample covariance
- semicovariance
- exponentially weighted covariance
- mininum covariance determinant
- shrunk covariance matrices:
- manual shrinkage
- Ledoit Wolf shrinkage
- Oracle Approximating shrinkage
- covariance to correlation matrix
"""
import warnings
import numpy as np
import pandas as pd
import sklearn.covariance
from .expected_returns import returns_from_prices
def sample_cov(prices, frequency=252):
"""
Calculate the annualised sample covariance matrix of (daily) asset returns.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
return daily_returns.cov() * frequency
def semicovariance(prices, benchmark=0.000079, frequency=252):
"""
Estimate the semicovariance matrix, i.e the covariance given that
the returns are less than the benchmark.
.. semicov = E([min(r_i - B, 0)] . [min(r_j - B, 0)])
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param benchmark: the benchmark return, defaults to the daily risk-free rate, i.e
:math:`1.02^{(1/252)} -1`.
:type benchmark: float
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year). Ensure that you use the appropriate
benchmark, e.g if ``frequency=12`` use the monthly risk-free rate.
:type frequency: int, optional
:return: semicovariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
drops = np.fmin(daily_returns - benchmark, 0)
return drops.cov() * frequency
def _pair_exp_cov(X, Y, span=180):
"""
Calculate the exponential covariance between two timeseries of returns.
:param X: first time series of returns
:type X: pd.Series
:param Y: second time series of returns
:type Y: pd.Series
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:return: the exponential covariance between X and Y
:rtype: float
"""
covariation = (X - X.mean()) * (Y - Y.mean())
# Exponentially weight the covariation and take the mean
if span < 10:
warnings.warn("it is recommended to use a higher span, e.g 30 days")
return covariation.ewm(span=span).mean()[-1]
def exp_cov(prices, span=180, frequency=252):
"""
Estimate the exponentially-weighted covariance matrix, which gives
greater weight to more recent data.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised estimate of exponential covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
daily_returns = returns_from_prices(prices)
N = len(assets)
# Loop over matrix, filling entries with the pairwise exp cov
S = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
S[i, j] = S[j, i] = _pair_exp_cov(
daily_returns.iloc[:, i], daily_returns.iloc[:, j], span
)
return pd.DataFrame(S * frequency, columns=assets, index=assets)
def min_cov_determinant(prices, frequency=252, random_state=None):
"""
Calculate the minimum covariance determinant, an estimator of the covariance matrix
that is more robust to noise.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param random_state: random seed to make results reproducible, defaults to None
:type random_state: int, optional
:return: annualised estimate of covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
X = prices.pct_change().dropna(how="all")
X = np.nan_to_num(X.values)
raw_cov_array = sklearn.covariance.fast_mcd(X, random_state=random_state)[1]
return pd.DataFrame(raw_cov_array, index=assets, columns=assets) * frequency
def cov_to_corr(cov_matrix):
"""
Convert a covariance matrix to a correlation matrix.
:param cov_matrix: covariance matrix
:type cov_matrix: pd.DataFrame
:return: correlation matrix
:rtype: pd.DataFrame
"""
if not isinstance(cov_matrix, pd.DataFrame):
warnings.warn("cov_matrix is not a dataframe", RuntimeWarning)
cov_matrix =
|
pd.DataFrame(cov_matrix)
|
pandas.DataFrame
|
import copy
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from data.dataloader import JHULoader
from pytz import timezone
from utils.fitting.loss import Loss_Calculator
from utils.generic.config import read_config
"""
Helper functions for processing different reichlab submissions, processing reichlab ground truth,
Comparing reichlab models with gt, processing and formatting our (Wadhwani AI) submission,
comparing that with gt as well
"""
def get_mapping(which='location_name_to_code', reichlab_path='../../../covid19-forecast-hub', read_from_github=False):
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
df = pd.read_csv(f'{reichlab_path}/data-locations/locations.csv')
df.dropna(how='any', axis=0, inplace=True)
if which == 'location_name_to_code':
mapping_dict = dict(zip(df['location_name'], df['location']))
elif which == 'location_name_to_abbv':
mapping_dict = dict(zip(df['location_name'], df['abbreviation']))
else:
mapping_dict = {}
return mapping_dict
def get_list_of_models(date_of_submission, comp, reichlab_path='../../../covid19-forecast-hub', read_from_github=False,
location_id_filter=78, num_submissions_filter=50):
"""Given an input of submission date, comp, gets list of all models that submitted.
Args:
date_of_submission (str): The ensemble creation date (always a Mon), for selecting a particular week
comp (str): Which compartment (Can be 'inc_case', 'cum_case', 'inc_death', or 'cum_death')
reichlab_path (str, optional): Path to reichlab repo (if cloned on machine).
Defaults to '../../../covid19-forecast-hub'.
read_from_github (bool, optional): If true, reads files directly from github
instead of cloned repo. Defaults to False.
location_id_filter (int, optional): Only considers locations with location code <= this input.
Defaults to 78. All states, territories have code <= 78. > 78, locations are counties
num_submissions_filter (bool, optional): Only selects models with submissions more than this.
Defaults to 50.
Returns:
list: list of eligible models
"""
if comp == 'cum_case':
comp = 'inc_case'
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
try:
df = pd.read_csv(f'{reichlab_path}/ensemble-metadata/' + \
f'{date_of_submission}-{comp}-model-eligibility.csv')
except:
date_convert = datetime.strptime(date_of_submission, '%Y-%m-%d')
date_of_filename = (date_convert - timedelta(days=1)).date()
df = pd.read_csv(f'{reichlab_path}/ensemble-metadata/' +
f'{date_of_filename}-{comp}-model-eligibility.csv')
df['location'] = df['location'].apply(lambda x : int(x) if x != 'US' else 0)
all_models = list(df['model'])
df_all_states = df[df['location'] <= location_id_filter]
df_eligible = df_all_states[df_all_states['overall_eligibility'] == 'eligible']
df_counts = df_eligible.groupby('model').count()
# Filter all models with > num_submissions_filter submissions
df_counts = df_counts[df_counts['overall_eligibility'] > num_submissions_filter]
eligible_models = list(df_counts.index)
# Add Wadhwani_AI-BayesOpt incase it isn't a part of the list
if ('Wadhwani_AI-BayesOpt' in all_models) & ('Wadhwani_AI-BayesOpt' not in eligible_models):
eligible_models.append('Wadhwani_AI-BayesOpt')
print(eligible_models)
return eligible_models
def process_single_submission(model, date_of_submission, comp, df_true, reichlab_path='../../../covid19-forecast-hub',
read_from_github=False, location_id_filter=78, num_weeks_filter=4):
"""Processes the CSV file of a single submission (one model, one instance of time)
Args:
model (str): The model name to process CSV of
date_of_submission (str): The ensemble creation date (always a Mon), for selecting a particular week
comp (str): Which compartment (Can be 'inc_case', 'cum_case', 'inc_death', or 'cum_death')
df_true (pd.DataFrame): The ground truth dataframe (Used for processing cum_cases submissions)
reichlab_path (str, optional): Path to reichlab repo (if cloned on machine).
Defaults to '../../../covid19-forecast-hub'.
read_from_github (bool, optional): If true, reads files directly from github
instead of cloned repo. Defaults to False.
location_id_filter (int, optional): All location ids <= this will be kept. Defaults to 78.
num_weeks_filter (int, optional): Only forecasts num_weeks_filter weeks ahead
will be kept. Defaults to 4.
Returns:
pd.DataFrame: model submssion processed dataframe
"""
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
try:
df = pd.read_csv(f'{reichlab_path}/data-processed/' + \
f'{model}/{date_of_submission}-{model}.csv')
except:
date_convert = datetime.strptime(date_of_submission, '%Y-%m-%d')
date_of_filename = date_convert - timedelta(days=1)
try:
df = pd.read_csv(f'{reichlab_path}/data-processed/' + \
f'{model}/{date_of_filename.strftime("%Y-%m-%d")}-{model}.csv')
except:
date_of_filename = date_of_filename - timedelta(days=1)
try:
df = pd.read_csv(f'{reichlab_path}/data-processed/' + \
f'{model}/{date_of_filename.strftime("%Y-%m-%d")}-{model}.csv')
except:
return None
# Converting all locations to integers
df['location'] = df['location'].apply(lambda x : int(x) if x != 'US' else 0)
# Keeping only states and territories forecasts
df = df[df['location'] <= location_id_filter]
df['model'] = model
# Only keeping the wk forecasts
df = df[df['target'].apply(lambda x : 'wk' in x)]
# Only the forecasts corresponding the comp user are interested in
if comp == 'cum_case':
df = df[df['target'].apply(lambda x : 'inc_case'.replace('_', ' ') in x)]
else:
df = df[df['target'].apply(lambda x : comp.replace('_', ' ') in x)]
# Pruning the forecasts which are beyond 4 weeks ahead
df = df[df['target'].apply(lambda x : int(re.findall(r'\d+', x)[0])) <= num_weeks_filter]
df['target_end_date'] = pd.to_datetime(df['target_end_date'])
df['forecast_date'] = pd.to_datetime(df['forecast_date'])
if comp == 'cum_case':
grouped = df.groupby(['location', 'type', 'quantile'], dropna=False)
df_cumsum = pd.DataFrame(columns=df.columns)
for _, group in grouped:
group['value'] = group['value'].cumsum()
df_cumsum =
|
pd.concat([df_cumsum, group], ignore_index=True)
|
pandas.concat
|
import json
import pathlib
from fastai.text import *
import numpy as np
import pandas as pd
BOS = 'xbos' # beginning-of-sentence tag
FLD = 'xfld' # data field tag
PATH = pathlib.Path("lm-data/wiki_extr/id")
LM_PATH=Path('lm-data/id/lm/')
LM_PATH.mkdir(parents=True, exist_ok=True)
LANG_FILENAMES = [str(f) for f in PATH.rglob("*/*")]
print(len(LANG_FILENAMES))
print(LANG_FILENAMES[0:5])
LANG_TEXT = []
for i in LANG_FILENAMES:
for line in open(i, encoding="utf-8"):
LANG_TEXT.append(json.loads(line))
LANG_TEXT = pd.DataFrame(LANG_TEXT)
LANG_TEXT.to_csv("{}/Wiki_Indonesia_Corpus.csv".format(LM_PATH), index=False)
LANG_TEXT = pd.read_csv("{}/Wiki_Indonesia_Corpus.csv".format(LM_PATH))
(LANG_TEXT.assign(labels = 0).pipe(lambda x: x[['labels', 'text']])
.to_csv("{}/Wiki_Indonesia_Corpus2.csv".format(LM_PATH), header=None, index=False))
# Some statistics of Indonesia Wikipedia
### Getting rid of the title name in the text field
def split_title_from_text(text):
words = text.split("\n\n")
if len(words) >= 2:
return ''.join(words[1:])
else:
return ''.join(words)
LANG_TEXT['text'] = LANG_TEXT['text'].apply(lambda x: split_title_from_text(x))
### Number of documents
print(LANG_TEXT['text'][:5])
print(LANG_TEXT.shape)
### Number of words in all the documents
print(LANG_TEXT['text'].apply(lambda x: len(x.split(" "))).sum())
# ### Number of unique tokens across documents
print(len(set(''.join(LANG_TEXT['text'].values).split(" "))))
def get_texts(df, n_lbls=1):
labels = df.iloc[:,range(n_lbls)].values.astype(np.int64)
texts = '\n{} {} 1 '.format(BOS, FLD) + df[n_lbls].astype(str)
for i in range(n_lbls+1, len(df.columns)): texts += ' {} {} '.format(FLD, i-n_lbls) + df[i].astype(str)
#texts = texts.apply(fixup).values.astype(str)
tok = Tokenizer().proc_all_mp(partition_by_cores(texts)) # splits the list into sublists for processing by each core
# Lower and upper case is inside the tokenizer
return tok, list(labels)
def get_all(df, n_lbls):
tok, labels = [], []
for i, r in enumerate(df):
print(i)
#pdb.set_trace()
tok_, labels_ = get_texts(r, n_lbls)
tok += tok_;
labels += labels_
return tok, labels
LANG_TEXT = pd.read_csv("{}/Wiki_Indonesia_Corpus2.csv".format(LM_PATH), header=None)#, chunksize=5000)
print(LANG_TEXT.head())
print(LANG_TEXT.shape)
trn_texts,val_texts = sklearn.model_selection.train_test_split(
LANG_TEXT, test_size=0.1) # split the data into train and validation sets
np.random.seed(42)
trn_idx = np.random.permutation(len(trn_texts)) # generate a random ordering
val_idx = np.random.permutation(len(val_texts))
df_trn = trn_texts.iloc[trn_idx,:] # sort things randomly
df_val = val_texts.iloc[val_idx,:] # sort things randomly
df_trn.columns = ['labels', 'text']
df_val.columns = ['labels', 'text']
df_trn.to_csv(LM_PATH/'train.csv', header=False, index=False)
df_val.to_csv(LM_PATH/'test.csv', header=False, index=False) # saving the data in our new format to disk
chunksize = 10000
df_trn =
|
pd.read_csv(LM_PATH/'train.csv', header=None, chunksize=chunksize)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
""" CLI for prediction
source activate condaenv
which python
python cli_predict.py --file_input data/address_matching_data.csv --folder_model models/model_01/ --folder_output data/ > data/log_test.txt 2>&1
> data/log_test.txt 2>&1 Re-direction
"""
import argparse
import logging
import os
import sys
from time import sleep
import numpy as np
import pandas as pd
####################################################################################################
import util
import util_feature
import util_model
############### Variable definition ################################################################
CWD_FOLDER = os.getcwd()
####################################################################################################
logger = logging.basicConfig()
def log(*argv):
logger.info(",".join([str(x) for x in argv]))
####################################################################################################
def load_arguments():
parser = argparse.ArgumentParser(description="Prediction CLI")
parser.add_argument("--verbose", default=0, help="verbose")
parser.add_argument("--log_file", type=str, default="log.txt", help="log file")
parser.add_argument("--file_input", type=str, default="", help="Input file")
parser.add_argument("--folder_output", default="nodaemon", help="Folder output")
parser.add_argument("--folder_model", default="model/", help="Model")
args = parser.parse_args()
return args
def data_load(file_input):
df =
|
pd.read_csv(file_input)
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import matplotlib.cm
import pandas as pd
import numpy as np
import itertools
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
from matplotlib import gridspec
import matplotlib.patheffects as PathEffects
PRACTICE_COLOR = '#22447A'
RECRUIT_COLOR = '#2EA9B0'
DEACTIVATED_COUNTY_COLOR = '#FBDFDB'
ACTIVATED_COUNTY_COLOR = '#EA5D4E'
def darken_color(color, amount=0.7):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], amount * c[1], c[2])
def print_map(df_genvasc_practices, df_potential_practices):
ax_map = plt.subplot(gs[2])
# Map bounding box
# westlimit=-1.5044; southlimit=52.1023; eastlimit=-0.3151; northlimit=52.8302
llcrnrlon = -1.6
llcrnrlat = 51.95
urcrnrlon = 0.6
urcrnrlat = 53.5
map = Basemap(
resolution='c',
projection='merc',
llcrnrlon=llcrnrlon,
llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon,
urcrnrlat=urcrnrlat
)
map.readshapefile('data/English Ceremonial Counties', 'counties', drawbounds=False)
activated_counties = [
'Leicestershire',
'Northamptonshire',
'Rutland',
]
deactivated_counties = [
'Cambridgeshire',
'Lincolnshire',
]
patches = [Polygon(np.array(shape), True) for info, shape in zip(map.counties_info, map.counties) if info['NAME'] in activated_counties]
pc = PatchCollection(patches, zorder=2, facecolors=[ACTIVATED_COUNTY_COLOR], edgecolor='#FFFFFF', linewidths=1.)
ax_map.add_collection(pc)
patches = [Polygon(np.array(shape), True) for info, shape in zip(map.counties_info, map.counties) if info['NAME'] in deactivated_counties]
pc = PatchCollection(patches, zorder=2, facecolors=[DEACTIVATED_COUNTY_COLOR], edgecolor='#FFFFFF', linewidths=1.)
ax_map.add_collection(pc)
df_towns = pd.read_csv('data/towns_geo.csv')
df_towns = df_towns[df_towns.name.isin([
'leicester',
'loughborough',
'northampton',
'kettering',
'cambridge',
'peterborough',
'lincoln',
'grantham',
'boston',
'oakham',
])]
for index, row in df_towns.iterrows():
x, y = map(row['x'], row['y'])
txt = ax_map.text(
x,
y,
row['name'].title().replace(' ', '\n'),
fontsize=26,
horizontalalignment='left',
verticalalignment='top',
color='#222222',
weight='bold'
)
txt.set_path_effects([PathEffects.withStroke(linewidth=6, foreground='#FFFFFF')])
for index, row in pd.concat([df_genvasc_practices, df_potential_practices]).iterrows():
x, y = map(row['x'], row['y'])
map.plot(x, y, marker='.', color=PRACTICE_COLOR, markersize=15)
def print_bar(title, first, second, color, max, step):
plt.title(title, fontsize=26, y=1.02)
plt.bar((title), first, align='center', color=color)
plt.bar((title), second, bottom=first, align='center', color=darken_color(color, 2))
plt.yticks(np.arange(0, step * round(max/step), step=step), fontsize=20)
plt.xticks([])
def recruits():
ax_recruits = plt.subplot(gs[1])
df_recruitment = pd.read_csv('data/genvasc_recruitment.csv')
df_recruitment = df_recruitment.set_index('year')
recruit_count = df_recruitment.loc[year]['cum_recruited']
potential_recruits = 0
if include_potential:
potential_linc_cam_in_2_years = 8000
actual_llr_northants_in_last_year = 8000
potential_recruits += potential_linc_cam_in_2_years + (actual_llr_northants_in_last_year * 2)
print_bar('Recruits', recruit_count, potential_recruits, RECRUIT_COLOR, 75_000, 10_000)
def practices(df_genvasc_practices, df_potential_practices):
ax_practices = plt.subplot(gs[0])
practice_count = len(df_genvasc_practices.index)
potential_practice_count = (0.7 * len(df_potential_practices.index))
print_bar('Practices', practice_count, potential_practice_count, PRACTICE_COLOR, 600, 100)
year = 2018
include_potential = False
output_filename = 'genvasc_2018'
plt.rcParams["font.family"] = "lato"
fig = plt.figure(figsize=(16, 12))
gs = gridspec.GridSpec(
1,
3,
width_ratios=[1, 1,10]
)
df_genvasc_practices =
|
pd.read_csv('data/genvasc_practices_geo.csv')
|
pandas.read_csv
|
from airflow.models import Variable
import pandas as pd
import sqlalchemy as db
import configparser
import logging
# variables
SOURCE_MYSQL_HOST = Variable.get('SOURCE_MYSQL_HOST')
SOURCE_MYSQL_PORT = Variable.get('SOURCE_MYSQL_PORT')
SOURCE_MYSQL_USER = Variable.get('SOURCE_MYSQL_USER')
SOURCE_MYSQL_PASSWORD = Variable.get('SOURCE_MYSQL_PASSWORD')
SOURCE_MYSQL_ROOT_PASSWORD = Variable.get('SOURCE_MYSQL_ROOT_PASSWORD')
SOURCE_MYSQL_DATABASE = Variable.get('SOURCE_MYSQL_DATABASE')
DW_MYSQL_HOST = Variable.get('DW_MYSQL_HOST')
DW_MYSQL_PORT = Variable.get('DW_MYSQL_PORT')
DW_MYSQL_USER = Variable.get('DW_MYSQL_USER')
DW_MYSQL_PASSWORD = Variable.get('DW_MYSQL_PASSWORD')
DW_MYSQL_ROOT_PASSWORD = Variable.get('DW_MYSQL_ROOT_PASSWORD')
DW_MYSQL_DATABASE = Variable.get('DW_MYSQL_DATABASE')
# Database connection URI
db_conn_url = "mysql+pymysql://{}:{}@{}:{}/{}".format(SOURCE_MYSQL_USER,
SOURCE_MYSQL_PASSWORD,
SOURCE_MYSQL_HOST,
SOURCE_MYSQL_PORT,
SOURCE_MYSQL_DATABASE)
db_engine = db.create_engine(db_conn_url)
# Data warehouse connection URI
dw_conn_url = "mysql+pymysql://{}:{}@{}:{}/{}".format(DW_MYSQL_USER,
DW_MYSQL_PASSWORD,
DW_MYSQL_HOST,
DW_MYSQL_PORT,
DW_MYSQL_DATABASE)
dw_engine = db.create_engine(dw_conn_url)
def get_dimStore_last_id(db_engine):
"""Function to get last store_key from dimemsion table `dimStore`"""
query = "SELECT max(store_key) AS last_id FROM dimStore"
tdf = pd.read_sql(query, db_engine)
return tdf.iloc[0]['last_id']
def extract_table_store(last_id, db_engine):
"""Function to extract table `store`"""
if last_id == None:
last_id = -1
query = "SELECT * FROM store WHERE store_id > {} LIMIT 100000".format(
last_id)
logging.info("query={}".format(query))
return pd.read_sql(query, db_engine)
def lookup_table_address(store_df, db_engine):
"""Function to lookup table `address`"""
unique_ids = list(store_df.address_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM address WHERE address_id IN ({})".format(
','.join(map(str, unique_ids)))
return pd.read_sql(query, db_engine)
def lookup_table_city(address_df, db_engine):
"""Function to lookup table `city`"""
unique_ids = list(address_df.city_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM city WHERE city_id IN ({})".format(
','.join(map(str, unique_ids)))
return pd.read_sql(query, db_engine)
def lookup_table_country(address_df, db_engine):
"""Function to lookup table `country`"""
unique_ids = list(address_df.country_id.unique())
unique_ids = list(filter(None, unique_ids))
query = "SELECT * FROM country WHERE country_id IN ({})".format(
','.join(map(str, unique_ids)))
return
|
pd.read_sql(query, db_engine)
|
pandas.read_sql
|
from __future__ import print_function, division
import random
import sys
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import h5py
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Conv1D, LSTM, Bidirectional, Dropout
from keras.utils import plot_model
from nilmtk.utils import find_nearest
from nilmtk.feature_detectors import cluster
from nilmtk.legacy.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
class RNNDisaggregator(Disaggregator):
'''Attempt to create a RNN Disaggregator
Attributes
----------
model : keras Sequential model
mmax : the maximum value of the aggregate data
MIN_CHUNK_LENGTH : int
the minimum length of an acceptable chunk
'''
def __init__(self):
'''Initialize disaggregator
'''
self.MODEL_NAME = "LSTM"
self.mmax = None
self.MIN_CHUNK_LENGTH = 100
self.model = self._create_model()
def train(self, mains, meter, epochs=1, batch_size=128, **load_kwargs):
'''Train
Parameters
----------
mains : a nilmtk.ElecMeter object for the aggregate data
meter : a nilmtk.ElecMeter object for the meter data
epochs : number of epochs to train
batch_size : size of batch used for training
**load_kwargs : keyword arguments passed to `meter.power_series()`
'''
main_power_series = mains.power_series(**load_kwargs)
meter_power_series = meter.power_series(**load_kwargs)
# Train chunks
run = True
mainchunk = next(main_power_series)
meterchunk = next(meter_power_series)
if self.mmax == None:
self.mmax = mainchunk.max()
while(run):
mainchunk = self._normalize(mainchunk, self.mmax)
meterchunk = self._normalize(meterchunk, self.mmax)
self.train_on_chunk(mainchunk, meterchunk, epochs, batch_size)
try:
mainchunk = next(main_power_series)
meterchunk = next(meter_power_series)
except:
run = False
def train_on_chunk(self, mainchunk, meterchunk, epochs, batch_size):
'''Train using only one chunk
Parameters
----------
mainchunk : chunk of site meter
meterchunk : chunk of appliance
epochs : number of epochs for training
batch_size : size of batch used for training
'''
# Replace NaNs with 0s
mainchunk.fillna(0, inplace=True)
meterchunk.fillna(0, inplace=True)
ix = mainchunk.index.intersection(meterchunk.index)
mainchunk = np.array(mainchunk[ix])
meterchunk = np.array(meterchunk[ix])
mainchunk = np.reshape(mainchunk, (mainchunk.shape[0],1,1))
self.model.fit(mainchunk, meterchunk, epochs=epochs, batch_size=batch_size, shuffle=True)
def train_across_buildings(self, mainlist, meterlist, epochs=1, batch_size=128, **load_kwargs):
'''Train using data from multiple buildings
Parameters
----------
mainlist : a list of nilmtk.ElecMeter objects for the aggregate data of each building
meterlist : a list of nilmtk.ElecMeter objects for the meter data of each building
batch_size : size of batch used for training
epochs : number of epochs to train
**load_kwargs : keyword arguments passed to `meter.power_series()`
'''
assert len(mainlist) == len(meterlist), "Number of main and meter channels should be equal"
num_meters = len(mainlist)
mainps = [None] * num_meters
meterps = [None] * num_meters
mainchunks = [None] * num_meters
meterchunks = [None] * num_meters
# Get generators of timeseries
for i,m in enumerate(mainlist):
mainps[i] = m.power_series(**load_kwargs)
for i,m in enumerate(meterlist):
meterps[i] = m.power_series(**load_kwargs)
# Get a chunk of data
for i in range(num_meters):
mainchunks[i] = next(mainps[i])
meterchunks[i] = next(meterps[i])
if self.mmax == None:
self.mmax = max([m.max() for m in mainchunks])
run = True
while(run):
# Normalize and train
mainchunks = [self._normalize(m, self.mmax) for m in mainchunks]
meterchunks = [self._normalize(m, self.mmax) for m in meterchunks]
self.train_across_buildings_chunk(mainchunks, meterchunks, epochs, batch_size)
# If more chunks, repeat
try:
for i in range(num_meters):
mainchunks[i] = next(mainps[i])
meterchunks[i] = next(meterps[i])
except:
run = False
def train_across_buildings_chunk(self, mainchunks, meterchunks, epochs, batch_size):
'''Train using only one chunk of data. This chunk consists of data from
all buildings.
Parameters
----------
mainchunk : chunk of site meter
meterchunk : chunk of appliance
epochs : number of epochs for training
batch_size : size of batch used for training
'''
num_meters = len(mainchunks)
batch_size = int(batch_size/num_meters)
num_of_batches = [None] * num_meters
# Find common parts of timeseries
for i in range(num_meters):
mainchunks[i].fillna(0, inplace=True)
meterchunks[i].fillna(0, inplace=True)
ix = mainchunks[i].index.intersection(meterchunks[i].index)
m1 = mainchunks[i]
m2 = meterchunks[i]
mainchunks[i] = m1[ix]
meterchunks[i] = m2[ix]
num_of_batches[i] = int(len(ix)/batch_size) - 1
for e in range(epochs): # Iterate for every epoch
print(e)
batch_indexes = list(range(min(num_of_batches)))
random.shuffle(batch_indexes)
for bi, b in enumerate(batch_indexes): # Iterate for every batch
print("Batch {} of {}".format(bi,num_of_batches), end="\r")
sys.stdout.flush()
X_batch = np.empty((batch_size*num_meters, 1, 1))
Y_batch = np.empty((batch_size*num_meters, 1))
# Create a batch out of data from all buildings
for i in range(num_meters):
mainpart = mainchunks[i]
meterpart = meterchunks[i]
mainpart = mainpart[b*batch_size:(b+1)*batch_size]
meterpart = meterpart[b*batch_size:(b+1)*batch_size]
X = np.reshape(mainpart, (batch_size, 1, 1))
Y = np.reshape(meterpart, (batch_size, 1))
X_batch[i*batch_size:(i+1)*batch_size] = np.array(X)
Y_batch[i*batch_size:(i+1)*batch_size] = np.array(Y)
# Shuffle data
p = np.random.permutation(len(X_batch))
X_batch, Y_batch = X_batch[p], Y_batch[p]
# Train model
self.model.train_on_batch(X_batch, Y_batch)
print("\n")
def disaggregate(self, mains, output_datastore, meter_metadata, **load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : a nilmtk.ElecMeter of aggregate data
meter_metadata: a nilmtk.ElecMeter of the observed meter used for storing the metadata
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
print("New sensible chunk: {}".format(len(chunk)))
timeframes.append(chunk.timeframe)
measurement = chunk.name
chunk2 = self._normalize(chunk, self.mmax)
appliance_power = self.disaggregate_chunk(chunk2)
appliance_power[appliance_power < 0] = 0
appliance_power = self._denormalize(appliance_power, self.mmax)
# Append prediction to output
data_is_available = True
cols = pd.MultiIndex.from_tuples([chunk.name])
meter_instance = meter_metadata.instance()
df = pd.DataFrame(
appliance_power.values, index=appliance_power.index,
columns=cols, dtype="float32")
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, df)
# Append aggregate data to output
mains_df =
|
pd.DataFrame(chunk, columns=cols, dtype="float32")
|
pandas.DataFrame
|
from src.typeDefs.wbesPxTableRecord import ISection_2_2, IWbesPxHeaders, IWbesPxTableRecord
import datetime as dt
from src.repos.metricsData.metricsDataRepo import MetricsDataRepo
import pandas as pd
def fetchWbesPxTableContext(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime) -> ISection_2_2:
mRepo = MetricsDataRepo(appDbConnStr)
# get iex Px data for the range between start date and end date
wbesPxIexVals = mRepo.getWbesPxIexBlockWiseData(startDt, endDt)
wbesPxPxiVals = mRepo.getWbesPxPxiBlockWiseData(startDt, endDt)
wbesPxIexDf = pd.DataFrame(wbesPxIexVals)
wbesPxPxiDf = pd.DataFrame(wbesPxPxiVals)
wbesPxIexTableDf = wbesPxIexDf.groupby(['time_stamp', 'beneficiary', 'beneficiary_type']).sum()
wbesPxPxiTableDf = wbesPxPxiDf.groupby(['time_stamp', 'beneficiary', 'beneficiary_type']).sum()
wbesPxIexTableDf = wbesPxIexTableDf.rename(columns={'data_value': 'px_iex_data'})
wbesPxIexTableDf.reset_index(inplace = True)
index_names = wbesPxIexTableDf[wbesPxIexTableDf['beneficiary_type'] == 'path'].index
wbesPxIexTableDf.drop(index_names, inplace = True)
index_names = wbesPxIexTableDf[wbesPxIexTableDf['beneficiary'] == 'West '].index
wbesPxIexTableDf.drop(index_names, inplace = True)
wbesPxIexTableDf.reset_index(inplace = True)
for itr in range(len(wbesPxIexTableDf)):
if wbesPxIexTableDf['beneficiary_type'][itr] == ' Injection ':
wbesPxIexTableDf['beneficiary_type'][itr] = 'Sell'
wbesPxIexTableDf['px_iex_data'][itr] = -1*(wbesPxIexTableDf['px_iex_data'][itr])
if wbesPxIexTableDf['beneficiary_type'][itr] == ' Drawal ':
wbesPxIexTableDf['beneficiary_type'][itr] = 'Buy'
wbesPxIexTableDf['beneficiary_name'] = wbesPxIexTableDf.beneficiary.str.cat(wbesPxIexTableDf.beneficiary_type,sep=" ")
wbesPxIexTableDf.drop(['index', 'beneficiary_type', 'beneficiary'],axis=1,inplace=True)
wbesPxPxiTableDf = wbesPxPxiTableDf.rename(columns={'data_value': 'px_pxi_data'})
wbesPxPxiTableDf.reset_index(inplace = True)
index_names = wbesPxPxiTableDf[wbesPxPxiTableDf['beneficiary_type'] == 'path'].index
wbesPxPxiTableDf.drop(index_names, inplace = True)
index_names = wbesPxPxiTableDf[wbesPxPxiTableDf['beneficiary'] == 'West '].index
wbesPxPxiTableDf.drop(index_names, inplace = True)
wbesPxPxiTableDf.reset_index(inplace = True)
for itr in range(len(wbesPxPxiTableDf)):
if wbesPxPxiTableDf['beneficiary_type'][itr] == ' Injection ':
wbesPxPxiTableDf['beneficiary_type'][itr] = 'Sell'
wbesPxPxiTableDf['px_pxi_data'][itr] = -1*(wbesPxPxiTableDf['px_pxi_data'][itr])
if wbesPxPxiTableDf['beneficiary_type'][itr] == ' Drawal ':
wbesPxPxiTableDf['beneficiary_type'][itr] = 'Buy'
wbesPxPxiTableDf['beneficiary_name'] = wbesPxPxiTableDf.beneficiary.str.cat(wbesPxPxiTableDf.beneficiary_type,sep=" ")
wbesPxPxiTableDf.drop(['index', 'beneficiary_type', 'beneficiary'],axis=1,inplace=True)
# testing
testPxIex = wbesPxIexTableDf
testPxPxi = wbesPxPxiTableDf
testPxPxi = testPxPxi.rename(columns={'px_pxi_data': 'data_value'})
testPxIex = testPxIex.rename(columns={'px_iex_data': 'data_value'})
testPxIex = testPxIex.append(testPxPxi, ignore_index=True)
testPxIex = testPxIex.groupby(['time_stamp', 'beneficiary_name']).sum()
testPxIex.reset_index(inplace = True)
testPxIex['data_value'] = testPxIex['data_value']/4
testPxIex['data_value'] = testPxIex['data_value'].astype(int)
testPxIex['time_stamp'] = pd.to_datetime(testPxIex['time_stamp']).dt.date
# testPxIex['time_stamp'] = testPxIex['time_stamp'].dt.strftime('%d-%m-%Y')
testPxIex = testPxIex.pivot(
index='beneficiary_name', columns='time_stamp', values='data_value')
testPxIex = testPxIex.fillna(0)
testPxPxi = testPxPxi.rename(columns={'data_value': 'wbes_rtm_data'})
wbesPxTableDf = testPxIex
wbesPxTableDf['Grand Total'] = wbesPxTableDf.sum(axis=1)
index_names = wbesPxTableDf[wbesPxTableDf['Grand Total'] == 0].index
wbesPxTableDf.drop(index_names, inplace = True)
# testing starts
wbesPxTableDf.reset_index(inplace = True)
injection_vals = wbesPxTableDf[wbesPxTableDf['Grand Total'] < 0].index
injection_df = wbesPxTableDf.loc[injection_vals]
injection_sum = injection_df.select_dtypes(pd.np.number).sum().rename('total')
drawal_vals = wbesPxTableDf[wbesPxTableDf['Grand Total'] > 0].index
drawal_df = wbesPxTableDf.loc[drawal_vals]
drawal_sum = drawal_df.select_dtypes(pd.np.number).sum().rename('total')
# testing ends
wbesPxTableDf.reset_index(inplace = True)
wbesPxTableDf = wbesPxTableDf.sort_values(by='Grand Total')
# demo starts
wbesPxTableDf= wbesPxTableDf.append(injection_sum,ignore_index=True)
for itr in range(len(wbesPxTableDf['beneficiary_name'])):
if(
|
pd.isnull(wbesPxTableDf['beneficiary_name'][itr])
|
pandas.isnull
|
import collections
import json
import re
from collections import defaultdict
from io import StringIO
import numpy as np
import pandas as pd
import plotly.offline as opy
from clustergrammer import Network
from django.conf import settings
from django.urls import reverse
from django.utils import timezone
from loguru import logger
from linker.common import load_obj
from linker.constants import *
from linker.metadata import get_gene_names, get_compound_metadata, clean_label
from linker.models import Analysis, AnalysisData, Share, AnalysisHistory
from linker.reactome import ensembl_to_uniprot, uniprot_to_reaction, compound_to_reaction, \
reaction_to_pathway, reaction_to_uniprot, reaction_to_compound, uniprot_to_ensembl
from linker.reactome import get_reaction_df
from linker.views.pipelines import GraphOmicsInference
Relation = collections.namedtuple('Relation', 'keys values mapping_list')
def reactome_mapping(observed_gene_df, observed_protein_df, observed_compound_df,
compound_database_str, species_list, metabolic_pathway_only):
### all the ids that we have from the user ###
observed_gene_ids = get_ids_from_dataframe(observed_gene_df)
observed_protein_ids = get_ids_from_dataframe(observed_protein_df)
# try to convert all kegg ids to chebi ids, if possible
logger.info('Converting kegg ids -> chebi ids')
observed_compound_ids = get_ids_from_dataframe(observed_compound_df)
KEGG_2_CHEBI = load_obj(settings.EXTERNAL_KEGG_TO_CHEBI)
for cid in observed_compound_ids:
if cid not in KEGG_2_CHEBI:
logger.warning('Not found: %s' % cid)
KEGG_2_CHEBI[cid] = cid
if observed_compound_df is not None:
if compound_database_str == COMPOUND_DATABASE_CHEBI:
observed_compound_df.iloc[:, 0] = observed_compound_df.iloc[:, 0].map(
KEGG_2_CHEBI) # assume 1st column is id
observed_compound_ids = get_ids_from_dataframe(observed_compound_df)
### map genes -> proteins ###
logger.info('Mapping genes -> proteins')
gene_2_proteins_mapping, _ = ensembl_to_uniprot(observed_gene_ids, species_list)
gene_2_proteins = make_relations(gene_2_proteins_mapping, GENE_PK, PROTEIN_PK, value_key=None)
### maps proteins -> reactions ###
logger.info('Mapping proteins -> reactions')
protein_ids_from_genes = gene_2_proteins.values
known_protein_ids = list(set(observed_protein_ids + protein_ids_from_genes))
protein_2_reactions_mapping, _ = uniprot_to_reaction(known_protein_ids, species_list)
protein_2_reactions = make_relations(protein_2_reactions_mapping, PROTEIN_PK, REACTION_PK,
value_key='reaction_id')
### maps compounds -> reactions ###
logger.info('Mapping compounds -> reactions')
compound_2_reactions_mapping, _ = compound_to_reaction(observed_compound_ids, species_list)
compound_2_reactions = make_relations(compound_2_reactions_mapping, COMPOUND_PK, REACTION_PK,
value_key='reaction_id')
### maps reactions -> metabolite pathways ###
logger.info('Mapping reactions -> metabolite pathways')
reaction_ids_from_proteins = protein_2_reactions.values
reaction_ids_from_compounds = compound_2_reactions.values
reaction_ids = list(set(reaction_ids_from_proteins + reaction_ids_from_compounds))
reaction_2_pathways_mapping, reaction_2_pathways_id_to_names = reaction_to_pathway(reaction_ids,
species_list,
metabolic_pathway_only)
reaction_2_pathways = make_relations(reaction_2_pathways_mapping, REACTION_PK, PATHWAY_PK,
value_key='pathway_id')
### maps reactions -> proteins ###
logger.info('Mapping reactions -> proteins')
mapping, _ = reaction_to_uniprot(reaction_ids, species_list)
reaction_2_proteins = make_relations(mapping, REACTION_PK, PROTEIN_PK, value_key=None)
protein_2_reactions = merge_relation(protein_2_reactions, reverse_relation(reaction_2_proteins))
all_protein_ids = protein_2_reactions.keys
### maps reactions -> compounds ###
logger.info('Mapping reactions -> compounds')
if compound_database_str == COMPOUND_DATABASE_KEGG:
use_kegg = True
else:
use_kegg = False
reaction_2_compounds_mapping, reaction_to_compound_id_to_names = reaction_to_compound(reaction_ids, species_list,
use_kegg)
reaction_2_compounds = make_relations(reaction_2_compounds_mapping, REACTION_PK, COMPOUND_PK, value_key=None)
compound_2_reactions = merge_relation(compound_2_reactions, reverse_relation(reaction_2_compounds))
all_compound_ids = compound_2_reactions.keys
### map proteins -> genes ###
logger.info('Mapping proteins -> genes')
mapping, _ = uniprot_to_ensembl(all_protein_ids, species_list)
protein_2_genes = make_relations(mapping, PROTEIN_PK, GENE_PK, value_key=None)
gene_2_proteins = merge_relation(gene_2_proteins, reverse_relation(protein_2_genes))
all_gene_ids = gene_2_proteins.keys
### add links ###
# map NA to NA
gene_2_proteins = add_links(gene_2_proteins, GENE_PK, PROTEIN_PK, [NA], [NA])
protein_2_reactions = add_links(protein_2_reactions, PROTEIN_PK, REACTION_PK, [NA], [NA])
compound_2_reactions = add_links(compound_2_reactions, COMPOUND_PK, REACTION_PK, [NA], [NA])
reaction_2_pathways = add_links(reaction_2_pathways, REACTION_PK, PATHWAY_PK, [NA], [NA])
# map genes that have no proteins to NA
gene_pk_list = [x for x in all_gene_ids if x not in gene_2_proteins.keys]
gene_2_proteins = add_links(gene_2_proteins, GENE_PK, PROTEIN_PK, gene_pk_list, [NA])
# map proteins that have no genes to NA
protein_pk_list = [x for x in all_protein_ids if x not in gene_2_proteins.values]
gene_2_proteins = add_links(gene_2_proteins, GENE_PK, PROTEIN_PK, [NA], protein_pk_list)
# map proteins that have no reactions to NA
protein_pk_list = [x for x in all_protein_ids if x not in protein_2_reactions.keys]
protein_2_reactions = add_links(protein_2_reactions, PROTEIN_PK, REACTION_PK, protein_pk_list, [NA])
# map reactions that have no proteins to NA
reaction_pk_list = [x for x in reaction_ids if x not in protein_2_reactions.values]
protein_2_reactions = add_links(protein_2_reactions, PROTEIN_PK, REACTION_PK, [NA], reaction_pk_list)
# map compounds that have no reactions to NA
compound_pk_list = [x for x in all_compound_ids if x not in compound_2_reactions.keys]
compound_2_reactions = add_links(compound_2_reactions, COMPOUND_PK, REACTION_PK, compound_pk_list, [NA])
# map reactions that have no compounds to NA
reaction_pk_list = [x for x in reaction_ids if x not in compound_2_reactions.values]
compound_2_reactions = add_links(compound_2_reactions, COMPOUND_PK, REACTION_PK, [NA], reaction_pk_list)
# map reactions that have no pathways to NA
reaction_pk_list = [x for x in reaction_ids if x not in reaction_2_pathways.keys]
reaction_2_pathways = add_links(reaction_2_pathways, REACTION_PK, PATHWAY_PK, reaction_pk_list, [NA])
GTF_DICT = load_obj(settings.EXTERNAL_GENE_NAMES)
metadata_map = get_gene_names(all_gene_ids, GTF_DICT)
genes_json = pk_to_json(GENE_PK, 'gene_id', all_gene_ids, metadata_map, observed_gene_df,
observed_ids=observed_gene_ids)
gene_2_proteins_json = json.dumps(gene_2_proteins.mapping_list)
# metadata_map = get_uniprot_metadata_online(uniprot_ids)
proteins_json = pk_to_json('protein_pk', 'protein_id', all_protein_ids, metadata_map, observed_protein_df,
observed_ids=observed_protein_ids)
protein_2_reactions_json = json.dumps(protein_2_reactions.mapping_list)
# TODO: this feels like a very bad way to implement this
# We need to deal with uploaded peak data from PiMP, which contains a lot of duplicate identifications per peak
KEGG_ID_2_DISPLAY_NAMES = load_obj(settings.EXTERNAL_COMPOUND_NAMES)
metadata_map = get_compound_metadata(all_compound_ids, KEGG_ID_2_DISPLAY_NAMES, reaction_to_compound_id_to_names)
try:
mapping = get_mapping(observed_compound_df)
except KeyError:
mapping = None
except AttributeError:
mapping = None
compounds_json = pk_to_json('compound_pk', 'compound_id', all_compound_ids, metadata_map, observed_compound_df,
observed_ids=observed_compound_ids, mapping=mapping)
if mapping:
compound_2_reactions = expand_relation(compound_2_reactions, mapping, 'compound_pk')
compound_2_reactions_json = json.dumps(compound_2_reactions.mapping_list)
metadata_map = {}
for name in reaction_2_pathways_id_to_names:
tok = reaction_2_pathways_id_to_names[name]['name']
filtered = clean_label(tok)
species = reaction_2_pathways_id_to_names[name]['species']
metadata_map[name] = {'display_name': filtered, 'species': species}
reaction_count_df = None
pathway_count_df = None
pathway_ids = reaction_2_pathways.values
reactions_json = pk_to_json('reaction_pk', 'reaction_id', reaction_ids, metadata_map, reaction_count_df,
has_species=True)
pathways_json = pk_to_json('pathway_pk', 'pathway_id', pathway_ids, metadata_map, pathway_count_df,
has_species=True)
reaction_2_pathways_json = json.dumps(reaction_2_pathways.mapping_list)
results = {
GENOMICS: genes_json,
PROTEOMICS: proteins_json,
METABOLOMICS: compounds_json,
REACTIONS: reactions_json,
PATHWAYS: pathways_json,
GENES_TO_PROTEINS: gene_2_proteins_json,
PROTEINS_TO_REACTIONS: protein_2_reactions_json,
COMPOUNDS_TO_REACTIONS: compound_2_reactions_json,
REACTIONS_TO_PATHWAYS: reaction_2_pathways_json,
}
return results
def get_mapping(observed_compound_df):
mapping = defaultdict(list)
for idx, row in observed_compound_df.iterrows():
identifier = row[IDENTIFIER_COL]
peak_id = row[PIMP_PEAK_ID_COL]
mapping[identifier].append('%s_%s' % (identifier, peak_id))
return dict(mapping)
def save_analysis(analysis_name, analysis_desc,
genes_str, proteins_str, compounds_str, compound_database_str,
results, species_list, current_user, metabolic_pathway_only,
publication, publication_link):
metadata = {
'genes_str': genes_str,
'proteins_str': proteins_str,
'compounds_str': compounds_str,
'compound_database_str': compound_database_str,
'species_list': species_list,
'metabolic_pathway_only': metabolic_pathway_only
}
analysis = Analysis.objects.create(name=analysis_name,
description=analysis_desc,
metadata=metadata,
publication=publication,
publication_link=publication_link)
share = Share(user=current_user, analysis=analysis, read_only=False, owner=True)
share.save()
logger.info('Saved analysis %d (%s)' % (analysis.pk, species_list))
datatype_json = {
GENOMICS: (results[GENOMICS], 'genes_json', results['group_gene_df']),
PROTEOMICS: (results[PROTEOMICS], 'proteins_json', results['group_protein_df']),
METABOLOMICS: (results[METABOLOMICS], 'compounds_json', results['group_compound_df']),
REACTIONS: (results[REACTIONS], 'reactions_json', None),
PATHWAYS: (results[PATHWAYS], 'pathways_json', None),
GENES_TO_PROTEINS: (results[GENES_TO_PROTEINS], 'gene_proteins_json', None),
PROTEINS_TO_REACTIONS: (results[PROTEINS_TO_REACTIONS], 'protein_reactions_json', None),
COMPOUNDS_TO_REACTIONS: (results[COMPOUNDS_TO_REACTIONS], 'compound_reactions_json', None),
REACTIONS_TO_PATHWAYS: (results[REACTIONS_TO_PATHWAYS], 'reaction_pathways_json', None),
}
data = {}
for data_type, data_value in datatype_json.items():
# data_value is a tuple defined in the datatype_json dictionary above
json_str, ui_label, group_info = data_value
data[ui_label] = json_str
json_data = json.loads(json_str)
json_design = json.loads(group_info.to_json()) if group_info is not None else None
# key: comparison_name, value: a list of comparison results (p-values and FCs), if any
comparison_data = defaultdict(list)
# if it's a measurement data
if data_type in PKS:
# check the first row in json_data to see if there are any comparison results (p-values and FCs)
comparison_names = []
first_row = json_data[0]
for col_name, col_value in first_row.items():
if col_name.startswith(
PADJ_COL_PREFIX): # assume if we have the p-value column, there's also the FC column
comparison_name = col_name.replace(PADJ_COL_PREFIX, '', 1)
comparison_names.append(comparison_name)
# collect all measurement and comparison data
pk_col = PKS[data_type]
measurement_data = []
for row in json_data:
# separate the measurement data and the comparison data
new_measurement_row = {}
new_comparison_rows = defaultdict(
dict) # key: comparison_name, value: a comparison row (a dict of key: value pair)
for col_name, col_value in row.items():
# insert id columns into both comparison and measurement rows
if col_name == pk_col:
new_measurement_row[col_name] = col_value
for comparison_name in comparison_names:
new_comparison_rows[comparison_name].update({col_name: col_value})
# insert p-value column into comparison row
elif col_name.startswith(PADJ_COL_PREFIX):
comparison_name = col_name.replace(PADJ_COL_PREFIX, '', 1)
new_comparison_rows[comparison_name].update({'padj': col_value})
# insert FC column into comparison row
elif col_name.startswith(FC_COL_PREFIX):
comparison_name = col_name.replace(FC_COL_PREFIX, '', 1)
new_comparison_rows[comparison_name].update({'log2FoldChange': col_value})
# insert everything else into measuremnet rows
else:
new_measurement_row[col_name] = col_value
measurement_data.append(new_measurement_row)
for comparison_name in new_comparison_rows:
new_comparison_row = new_comparison_rows[comparison_name]
comparison_data[comparison_name].append(new_comparison_row)
else: # if it's other linking data, just store it directly
measurement_data = json_data
# create a new analysis data and save it
analysis_data = AnalysisData(analysis=analysis,
json_data=measurement_data,
json_design=json_design,
data_type=data_type)
# make clustergrammer if we have data
if data_type in [GENOMICS, PROTEOMICS, METABOLOMICS]:
cluster_json = get_clusters(analysis_data, data_type)
analysis_data.metadata = {
'clustergrammer': cluster_json
}
analysis_data.save()
logger.info('Saved analysis data %d for analysis %d' % (analysis_data.pk, analysis.pk))
# save each comparison separately into an AnalysisHistory
for comparison_name in comparison_data:
comparisons = comparison_data[comparison_name]
result_df = pd.DataFrame(comparisons)
pk_col = [col for col in result_df.columns if col in PKS.values()][0]
result_df.set_index(pk_col, inplace=True)
tokens = comparison_name.split('_vs_')
case = tokens[0]
control = tokens[1]
display_name = 'Loaded: %s_vs_%s' % (case, control)
inference_data = get_inference_data(data_type, case, control, result_df)
save_analysis_history(analysis_data, inference_data, display_name, INFERENCE_LOADED)
# if settings.DEBUG:
# save_json_string(v[0], 'static/data/debugging/' + v[1] + '.json')
return analysis
def get_clusters(analysis_data, data_type):
axis = 1
X_std, data_df, design_df = get_standardized_df(analysis_data, axis, pk_cols=IDS)
if data_type == GENOMICS:
json_data = to_clustergrammer(X_std, design_df, run_enrichr=None, enrichrgram=True)
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
json_data = to_clustergrammer(X_std, design_df)
return json_data
def get_standardized_df(analysis_data, axis, pk_cols=PKS):
data_type = analysis_data.data_type
data_df, design_df = get_dataframes(analysis_data, pk_cols)
# standardise data differently for genomics vs proteomics/metabolomics
X_std = None
if data_type == GENOMICS:
inference = GraphOmicsInference(data_df, design_df, data_type, min_value=MIN_REPLACE_GENOMICS)
X_std = inference.standardize_df(inference.data_df, axis=axis)
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
inference = GraphOmicsInference(data_df, design_df, data_type, min_value=MIN_REPLACE_PROTEOMICS_METABOLOMICS)
X_std = inference.standardize_df(inference.data_df, log=True, axis=axis)
return X_std, data_df, design_df
def to_clustergrammer(data_df, design_df, run_enrichr=None, enrichrgram=None):
json_data = None
if not data_df.empty:
net = Network()
data_df = data_df[~data_df.index.duplicated(keep='first')] # remove rows with duplicate indices
net.load_df(data_df)
cats = {}
for k, v in design_df.groupby('group').groups.items():
cats[k] = v.values.tolist()
net.add_cats('col', [
{
'title': 'Group',
'cats': cats
}
])
# net.filter_sum('row', threshold=20)
# net.normalize(axis='col', norm_type='zscore')
# net.filter_N_top('row', 1000, rank_type='var')
# net.filter_threshold('row', threshold=3.0, num_occur=4)
# net.swap_nan_for_zero()
# net.downsample(ds_type='kmeans', axis='col', num_samples=10)
# net.random_sample(random_state=100, num_samples=10, axis='col')
net.cluster(dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=run_enrichr, enrichrgram=enrichrgram)
json_data = net.export_net_json()
return json_data
def get_last_data(analysis, data_type):
analysis_data = AnalysisData.objects.filter(analysis=analysis, data_type=data_type).order_by('-timestamp')[0]
return analysis_data
def get_context(analysis, current_user):
show_selection_group = True if not current_user.is_anonymous else False
view_names = {
TABLE_IDS[GENOMICS]: get_reverse_url('get_ensembl_gene_info', analysis),
TABLE_IDS[PROTEOMICS]: get_reverse_url('get_uniprot_protein_info', analysis),
TABLE_IDS[METABOLOMICS]: get_reverse_url('get_kegg_metabolite_info', analysis),
TABLE_IDS[REACTIONS]: get_reverse_url('get_reactome_reaction_info', analysis),
TABLE_IDS[PATHWAYS]: get_reverse_url('get_reactome_pathway_info', analysis),
'get_firdi_data': get_reverse_url('get_firdi_data', analysis),
'get_heatmap_data': get_reverse_url('get_heatmap_data', analysis),
'get_short_info': get_reverse_url('get_short_info', None),
'save_group': get_reverse_url('save_group', analysis),
'load_group': get_reverse_url('load_group', analysis),
'list_groups': get_reverse_url('list_groups', analysis),
'get_boxplot': get_reverse_url('get_boxplot', analysis),
'get_gene_ontology': get_reverse_url('get_gene_ontology', analysis),
}
context = {
'analysis_id': analysis.pk,
'analysis_name': analysis.name,
'analysis_description': analysis.description,
'analysis_species': analysis.get_species_str(),
'publication': analysis.publication,
'publication_link': analysis.publication_link,
'view_names': json.dumps(view_names),
'show_gene_data': show_data_table(analysis, GENOMICS),
'show_protein_data': show_data_table(analysis, PROTEOMICS),
'show_compound_data': show_data_table(analysis, METABOLOMICS),
'read_only': analysis.get_read_only_status(current_user),
'show_selection_group': show_selection_group
}
return context
def show_data_table(analysis, data_type):
analysis_data = get_last_analysis_data(analysis, data_type)
data_df, design_df = get_dataframes(analysis_data, IDS)
return np.any(data_df['obs'] == True) # show table if there's any observation
def get_reverse_url(viewname, analysis):
if analysis is not None:
return reverse(viewname, kwargs={'analysis_id': analysis.id})
else:
return reverse(viewname)
# TODO: no longer used, can remove?
def get_count_df(gene_2_proteins_mapping, protein_2_reactions_mapping, compound_2_reactions_mapping,
reaction_2_pathways_mapping, species_list):
count_df, pathway_compound_counts, pathway_protein_counts = get_reaction_df(
gene_2_proteins_mapping,
protein_2_reactions_mapping,
compound_2_reactions_mapping,
reaction_2_pathways_mapping,
species_list)
reaction_count_df = count_df.rename({
'reaction_id': 'reaction_pk',
'observed_protein_count': 'R_E',
'observed_compound_count': 'R_C'
}, axis='columns')
reaction_count_df = reaction_count_df.drop([
'reaction_name',
'protein_coverage',
'compound_coverage',
'all_coverage',
'protein',
'all_protein_count',
'compound',
'all_compound_count',
'pathway_ids',
'pathway_names'
], axis=1)
pathway_pks = set(list(pathway_compound_counts.keys()) + list(pathway_protein_counts.keys()))
data = []
for pathway_pk in pathway_pks:
try:
p_e = pathway_protein_counts[pathway_pk]
except KeyError:
p_e = 0
try:
p_c = pathway_compound_counts[pathway_pk]
except KeyError:
p_c = 0
data.append((pathway_pk, p_e, p_c))
pathway_count_df = pd.DataFrame(data, columns=['pathway_pk', 'P_E', 'P_C'])
return reaction_count_df, pathway_count_df
def save_json_string(data, outfile):
with open(outfile, 'w') as f:
f.write(data)
logger.debug('Saving %s' % outfile)
def csv_to_dataframe(csv_str):
# extract group, if any
filtered_str = ''
group_str = None
for line in csv_str.splitlines(): # go through all lines and remove the line containing the grouping info
if re.match(GROUP_COL, line, re.I):
group_str = line
else:
filtered_str += line + '\n'
# extract id values
data = StringIO(filtered_str)
try:
data_df =
|
pd.read_csv(data)
|
pandas.read_csv
|
from typing import Dict, List, Optional
from datetime import datetime
import random
import string
from faker import Faker
import numpy as np
import pandas as pd
from helperpy.core.date_ops import get_random_timestamp
from helperpy.core.exceptions import raise_exception_if_invalid_option
def generate_random_hex_code() -> str:
"""Generates random 6-digit hexadecimal code"""
choices = '0123456789ABCDEF'
random_hex_code = '#'
for _ in range(6):
random_hex_code += random.choice(choices)
return random_hex_code
def generate_random_hex_codes(how_many: int) -> List[str]:
"""Returns list of random 6-digit hexadecimal codes"""
return [generate_random_hex_code() for _ in range(how_many)]
def generate_random_string(
length: Optional[int] = 15,
include_lowercase: Optional[bool] = True,
include_uppercase: Optional[bool] = True,
include_digits: Optional[bool] = True,
include_punctuations: Optional[bool] = True,
) -> str:
character_set = ""
if include_lowercase:
character_set += string.ascii_lowercase
if include_uppercase:
character_set += string.ascii_uppercase
if include_digits:
character_set += string.digits
if include_punctuations:
character_set += string.punctuation
return "".join((random.choice(character_set) for _ in range(length)))
def generate_random_data(
num_records: int,
column_to_datatype_mapper: Dict[str, str],
insert_random_nulls: Optional[bool] = False,
) -> pd.DataFrame:
"""
Returns DataFrame having randomly generated fake data.
Accepted data-type options for the columns in the DataFrame:
- integer
- float
- string
- date
- timestamp
- boolean
>>> generate_random_data(
num_records=1000,
column_to_datatype_mapper={
'name': 'string',
'age': 'integer',
'date_of_birth': 'date',
'joined_at': 'timestamp',
'salary': 'float',
'is_recent_recruit': 'boolean',
},
insert_random_nulls=False,
)
"""
datatypes = list(column_to_datatype_mapper.values())
for datatype in datatypes:
raise_exception_if_invalid_option(
option_name='datatype',
option_value=datatype,
valid_option_values=['integer', 'float', 'string', 'date', 'timestamp', 'boolean'],
)
dict_obj = {}
for column, datatype in column_to_datatype_mapper.items():
if datatype == 'integer':
dict_obj[column] = (random.randint(-99999, 99999) for _ in range(num_records))
elif datatype == 'float':
dict_obj[column] = (random.random() * random.choice([10, 100, 1000, 10000, 100000]) * random.choice([-1, 1]) for _ in range(num_records))
elif datatype == 'string':
dict_obj[column] = (generate_random_string(include_punctuations=False) for _ in range(num_records))
elif datatype == 'date':
dict_obj[column] = (get_random_timestamp().date() for _ in range(num_records))
elif datatype == 'timestamp':
dict_obj[column] = (get_random_timestamp() for _ in range(num_records))
elif datatype == 'boolean':
dict_obj[column] = (random.choice([True, False]) for _ in range(num_records))
df =
|
pd.DataFrame(data=dict_obj)
|
pandas.DataFrame
|
#!/usr/bin/env python3
##########################################################################################
# General Information
##########################################################################################
#
# Script created By <NAME> March 3, 2022
#
# Version 1.0 - Initial Creation of Script.
# Version 2.0 - Adding Computer fields and sheets to report
# Version 3.0 - Adding Bearer Token Auth for requests
# Version 4.0 - Adding Package to policy / Prestage Policy lookup for unused packages.
# Version 5.0 - Adding Computer Group Membership to Computers Sheet in report
# Version 6.0 - Adding Results filter for Computer Record Sheet to filter by
# computer, smart group, or none.
# Version 7.0 - Adding Configuration Profile Membership to Computers Sheet in report.
# Version 7.0 - Adding Default file path and file name to choice with date and time.
#
# This script take User Imput and will call the JAMF API and get all Information
# related to a Policy, Configuration Profile, and Computers.
#
# It looks up all selected Info and then returns an Excel spreadsheet.
#
# Fields returned in csv / Excel are as follows below:
#
#
##################################################
# Policy Record Type
##################################################
#
# Policy ID
# Policy Name
# Policy Category ID
# Policy Category Name
#
# Policy Target All Computers
#
# Policy Target Computer ID
# Policy Target Computer Name
#
# Policy Target Group ID
# Policy Target Group Name
# Policy Target Group is Smart
#
# Policy Exclusion Computer ID
# Policy Exclusion Computer Name
#
# Policy Exclusion Group id
# Policy Exclusion Group Name
# Policy Exclusion Group is Smart
#
# Policy Package ID
# Policy Package Name
# Policy Package Category Name
# Policy Package Filename
#
# Policy Script ID
# Policy Script Name
# Policy Script Category Name
# Policy Script Filename
#
#
##################################################
# Configuration Profile Record Type
##################################################
#
# Configuration Profile ID
# Configuration Profile Type
# Configuration Profile Name
#
# Configuration Profile Category ID
# Configuration Profile Category Name
#
# Configuration Profile Target Computer ID
# Configuration Profile Target Computer Name
#
# Configuration Profile Target Group ID
# Configuration Profile Target Group Name
# Configuration Profile Target Group is Smart
#
# Configuration Profile Exclusion Computer id
# Configuration Profile Exclusion Computer Name
#
# Configuration Profile Exclusion Group id
# Configuration Profile Exclusion Group Name
# Configuration Profile Exclusion Group is Smart
#
#
##################################################
# Computer Record Type
##################################################
# if you are usingFilter for SmartGroup
#
# Computer SmartGroup ID
#
# Computer SmartGroup Name
#
# Computer Record Type
#
# Computer ID
#
# Computer Name
#
# Computer Serial Number
#
# If you are not usingFilter or just single computer
#
# Computer Record Type
#
# Computer ID
#
# Computer Name
#
# Computer Serial Number
#
#
# Computer Make
#
# Computer Model
#
# Computer Model Identifier
#
# Computer OS Name
#
# Computer OS Version
#
# Computer OS Build
#
#
# Computer FileVault2 User
#
# Computer Local Account Name
#
# Computer Local Account Real Name
#
# Computer Local Account ID
#
# Computer Local Account is Admin
#
# Computer Local Account in LDAP
#
#
# Computer Group Membership Group ID
#
# Computer Group Membership Group Name
#
# Computer Group Membership Group Is Smart
#
#
# Configuration Profile Membership ID
#
# Configuration Profile Membership Name
#
#
##################################################
# Package to Policy lookup
##################################################
# Provides the following:
#
# Package used or Package Not Used
# in Policies
#
# Which Policy Package is used in. Policies
# or PreStage Policies
#
# Package ID
#
# Package Name
#
# Package File Name
#
# Policy ID if used in a Policy
#
# Policy Name if used in Policy
#
# PreStage Policy ID if used
# in PreStage Policy
#
# PreStage Policy Name if used
# in PreStage Policy
#
#
##################################################
# Additional Info
##################################################
#
# The only requirement is that you have Python3 on the device. All other libraries
# the script will look for them and download if they are not found.
#
# Run from terminal and answer the 3 fields. URL, API Username, API Password.
# You can also send command line args to the script
# For Example : yourScript.py "URL" "API Username" "API Password"
#
# You also get the option to select the path and filename for your xlsx file.
#
# In the Computers section you have the option of running the report with a
# smart group is or on the whole instance.
#
# When looking up local accounts from the computers section, you can do an LDAP
# check to see what accounts are in LDAP. Great for when you use a JIM server.
#
# It wall also look up all JIM servers and let you choose the one you want to use.
#
# The script uses the new bearer token auth for the API calls and then
# invalidates it when script is complete.
#
#
##########################################################################################
##########################################################################################
# License information
##########################################################################################
#
# Copyright (c) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##########################################################################################
##########################################################################################
# Imports
##########################################################################################
import os, sys, time, getpass, re, datetime
from os.path import exists
# For Using the Requests Library with the API
try:
import requests
except ImportError:
os.system('pip3 install requests')
time.sleep(3)
import requests
from requests.auth import HTTPBasicAuth
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from requests.exceptions import HTTPError
#For CSV processing with Pandas Library
try:
import pandas as pd
except ImportError:
os.system('pip3 install pandas')
time.sleep(3)
import pandas as pd
#For xlsx processing with openpyxl Library
try:
import openpyxl
except ImportError:
os.system('pip3 install openpyxl')
time.sleep(3)
import openpyxl
#For xlsx processing with xlsxwriter Library
try:
import xlsxwriter
except ImportError:
os.system('pip3 install xlsxwriter')
time.sleep(3)
import xlsxwriter
##########################################################################################
# Variables
##########################################################################################
#Set Variable for the Data
dataToCsvComputers = []
dataToCsvPolicy = []
dataToCsvConfigurationProfile = []
dataToCsvPackageToPolicy = []
JIMServerList = []
#To check User login in JAMF API
get_JAMF_URL_User_Test = "/JSSResource/accounts/username/"
# For default Local User Accounts you do not want in the List
filterDefaultUserAccountsList = ['daemon', 'jamfmgmt', 'nobody', 'root']
#Check CLA for input
if len(sys.argv) == 1:
# No CLA Given
APILoginURL = ""
APIUsername = ""
APIPassword = ""
elif len(sys.argv) == 2:
# No CLA Given
APILoginURL = sys.argv[1]
APIUsername = ""
APIPassword = ""
elif len(sys.argv) == 3:
# No CLA Given
APILoginURL = sys.argv[1]
APIUsername = sys.argv[2]
APIPassword = ""
elif len(sys.argv) == 4:
# No CLA Given
APILoginURL = sys.argv[1]
APIUsername = sys.argv[2]
APIPassword = sys.argv[3]
##########################################################################################
# Jamf API Setup Information
##########################################################################################
# requests headers
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
DEFAULT_TIMEOUT = 5 # seconds
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = DEFAULT_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
# Retry for requests
retry_strategy = Retry(
total=10,
backoff_factor=1,
status_forcelist=[204, 413, 429, 500, 502, 503, 504],
allowed_methods=["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE", "POST", "HTTP"]
)
adapter = TimeoutHTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
##########################################################################################
# Functions
##########################################################################################
#Check for Yes or no answer from input
def getYesOrNoInput(prompt):
while True:
try:
value = input(prompt)
except ValueError:
print("\nSorry, I didn't understand that.")
continue
if value.lower() != 'yes' and value.lower() != 'no':
print("\nSorry, your response must be yes or no only.")
continue
else:
break
return value
#Merge Dictionaries
def MergeComputersInfo(dict1, dict2, dict3, dict4, dict5, dict6):
result = dict1 | dict2 | dict3 | dict4 | dict5 | dict6
return result
def MergePolicyInfo(dict1, dict2, dict3, dict4, dict5, dict6):
result = dict1 | dict2 | dict3 | dict4 | dict5 | dict6
return result
def MergeConfigProfileInfo(dict1, dict2, dict3):
result = dict1 | dict2 | dict3
return result
#Check User Input for URL, Username, and Password
def JAMFInfoCheck(url, username, password):
try:
response = http.get(url, headers=headers, auth = HTTPBasicAuth(username, password))
if response.status_code == 200:
return print(f"\nUser Input is OK, we can connect to JAMF API, Moving on.\n\n")
else:
raise SystemExit(f"\nUser Input is NOT OK, we cannot connect to JAMF API and now will EXIT! status_code: {response.status_code}\n\n")
#Exception
except requests.exceptions.RequestException as e:
# print URL with Erors
raise SystemExit(f"\nUser Input is NOT OK, we cannot connect to JAMF API and now will EXIT! \n\nErr: {e}")
# let user choose Options from list
def let_user_pick(label, options):
print(label+"\n")
for index, element in enumerate(options):
print("{}) {}".format(index + 1, element))
while True:
try:
i = input("\nEnter number: ")
try:
if 0 < int(i) <= len(options):
return int(i) - 1
break
else:
print("\nI didn't get a number in the list. Please try again with a number in the list.")
continue
except ValueError:
print ("\nYou fail at typing numbers. Please try again with a NUMBER in the list")
continue
return None
except:
print ("\nOops, Something went wrong.")
return None
return None
# Check Input for Number only
def checkInputForNumber(label):
while True:
num = input(label+" ")
try:
val = int(num)
print("\nSetting Smart Group ID to: "+num)
smartGroupID = num
break;
except ValueError:
try:
float(num)
print("Input is an float number.")
print("Input number is: ", val)
break;
except ValueError:
print("\nThis is not a number. Please enter a valid number\n")
return num
def checkFilePath(prompt):
while True:
try:
value = input(prompt)
except ValueError:
print("\nSorry, I didn't understand that.")
continue
pathExist = os.path.exists(value)
if pathExist != True :
print("\nFile does not Path Exists.")
continue
else:
break
return value
def checkFileName(prompt):
while True:
try:
value = input(prompt)
except ValueError:
print("\nSorry, I didn't understand that.")
continue
if not value.endswith('.xlsx'):
print("\nFilename has the wrong extension for Excel.")
continue
else:
break
return value
def confirmExcelFileName(prompt):
while True:
try:
value = input(prompt)
except ValueError:
print("\nSorry, I didn't understand that.")
continue
if value.lower() != 'yes' and value.lower() != 'no':
print("\nSorry, your response must be yes or no only.")
continue
elif value.lower() == 'no' :
raise SystemExit(f"\nUser DID NOT confirm the Excel File Name and now will EXIT!")
elif value.lower() == 'yes':
break
return value
def checkIfPackageIsUsedInPolicy(data, key, value):
for i in range(len(data)):
try:
if(data[i][key]==value): return True
except:
pass
return False
##########################################################################################
# Get User Input
##########################################################################################
#Get User input if needed or use command line arguments
print("******************** JAMF API Credentials ********************\n")
if APILoginURL == "" :
get_JAMF_URL = input("Enter your JAMF Instance URL (https://yourjamf.jamfcloud.com): ")
else:
print("JAMF URL supplied in command line arguments.")
get_JAMF_URL = sys.argv[1]
if APIUsername == "" :
get_JAMF_API_Username = input("Enter your JAMF Instance API Username: ")
else:
print("JAMF API Username supplied in command line arguments.")
get_JAMF_API_Username = sys.argv[2]
if APIPassword == "" :
get_JAMF_API_Password = get<PASSWORD>("Enter your JAMF Instance API Password: ")
else:
print("JAMF API Username supplied in command line arguments.")
get_JAMF_API_Password = sys.argv[3]
#Check User Input for URL, Username, and Password
JAMFInfoCheck((get_JAMF_URL+get_JAMF_URL_User_Test+get_JAMF_API_Username), get_JAMF_API_Username, get_JAMF_API_Password)
##########################################################################################
# JAMF API Variables
##########################################################################################
JAMF_url = get_JAMF_URL
username = get_JAMF_API_Username
password = get_JAMF_API_Password
# Get Bearer token from JAMF API since we confirmed the Username and Password
btURL = JAMF_url + "/api/v1/auth/token"
token = http.post(btURL, headers=headers, auth = HTTPBasicAuth(username, password))
bearer = token.json()['token']
# requests headers with token auth
btHeaders = {
'Accept': 'application/json',
'Authorization': 'Bearer '+bearer
}
##########################################################################################
# Get Report Config Input
##########################################################################################
# Get Main Groups Section.
print("\n******************** JAMF API Excel File Info ********************\n")
get_JAMF_Default_Path_Name = getYesOrNoInput("Do you want to use the Default filename and path for the Report (/Users/Shared/JAMF_Excel_Report_xx_xx_xxxx-xx:xx:xx.xlsx) ? (yes or no): ")
if get_JAMF_Default_Path_Name == 'yes':
# Get Time
getFileNameTime = datetime.datetime.now()
fileNameTimeString = (getFileNameTime.strftime("%a_%b-%d-%Y_%H-%M-%S"))
#Set filename with time and date
get_JAMF_FilePath_Info = '/Users/Shared/'
get_JAMF_FileName_Info = 'JAMF_Excel_Report_' + str(fileNameTimeString) + '.xlsx'
elif get_JAMF_Default_Path_Name == 'no':
get_JAMF_Default_FilePath = getYesOrNoInput("Do you want to use the Default file path for the Report (/Users/Shared/) ? (yes or no): ")
if get_JAMF_Default_FilePath == 'yes':
get_JAMF_FilePath_Info = '/Users/Shared/'
get_JAMF_FileName_Info = checkFileName("Please enter the name you want to save the excel file as. (ex. \"myExcelFile.xlsx\") : ")
elif get_JAMF_Default_FilePath == 'no':
get_JAMF_FilePath_Info = checkFilePath("Please enter the full path where you want to save the file (ex. \"/Users/Shared/\") : ")
get_JAMF_FileName_Info = checkFileName("Please enter the name you want to save the excel file as. (ex. \"myExcelFile.xlsx\") : ")
getDesplayExcelReportFile = get_JAMF_FilePath_Info+get_JAMF_FileName_Info
desplayExcelReportFile = f"{getDesplayExcelReportFile}"
confirmExcelReportFile = confirmExcelFileName("Please confirm that the filename, " + desplayExcelReportFile + " is correct. (yes or no)")
if confirmExcelReportFile == 'yes':
excelReportFile = desplayExcelReportFile
print("\nSetting filename for JAMF Report to: "+excelReportFile+"\n")
# Get Main Groups Section.
print("\n\n******************** JAMF API Report Included Excel Sheets ********************\n")
get_JAMF_Computers_Info = getYesOrNoInput("Do you want to include JAMF Computer Info Section in Report? (yes or no): ")
get_JAMF_Policy_Info = getYesOrNoInput("Do you want to include JAMF Policy Info Section in Report? (yes or no): ")
get_JAMF_Configuration_Profile_Info = getYesOrNoInput("Do you want to include JAMF Configuration Profile Info Section in Report? (yes or no): ")
get_JAMF_Package_To_Policy_Info = getYesOrNoInput("Do you want to include JAMF Package To Policy Info Section in Report? (yes or no): ")
##########################################################################################
# Core Script
##########################################################################################
##################################################
# Get Jamf Computer Info
##################################################
print("\n\n******************** JAMF API Report Included Excel Sheets Config Info ********************\n")
if get_JAMF_Computers_Info == ("yes"):
#Get Computer Info
print("\nIncluding JAMF Computer Info.\n\n")
includeComputerInfo = "yes"
#Get Smart Group ID if needed
print("\n******************** JAMF API Computer Info Results Filter Section. ********************\n")
print("\n\nPlease choose how you would like the results returned in your report. It is recommended to use a smart group id or computer id for this report for quickest results.\n")
print("\nPlease Note if you choose all computers the report may take some time to complete depending on the number of computers in your JAMF system.")
# Set options for results filter for this section and question
myResultsFilterLabel = "Your results filter choices are:"
mymyResultsFilterOptions = ["Filter results for 1 Computer ID", "Filter results By Smart Group ID", "No Filter, Return All Computers"]
# Get choice from user
get_JAMF_Computers_Info_Results_Filter = let_user_pick(myResultsFilterLabel, mymyResultsFilterOptions)
get_JAMF_Computers_Info_Results_Filter_Choice = (mymyResultsFilterOptions[get_JAMF_Computers_Info_Results_Filter])
#Return choice and set filter
if get_JAMF_Computers_Info_Results_Filter_Choice == 'Filter results for 1 Computer ID':
print("\nUsing JAMF Computer ID to filter the Computer Report for 1 Computer Record.\n\n")
computerIDLabel = "Enter your JAMF Computer ID Number: "
get_JAMF_Computer_ID = checkInputForNumber(computerIDLabel)
print("\n")
JAMF_Computer_ID = get_JAMF_Computer_ID
usingFilter = "computerFilter"
elif get_JAMF_Computers_Info_Results_Filter_Choice == 'Filter results By Smart Group ID':
print("\nUsing JAMF Smart Group to filter the Computer Report for 1 Computer Smart Group.\n\n")
smartGroupIDLabel = "Enter your JAMF SmartGroup ID Number: "
get_JAMF_SmartGroup_ID = checkInputForNumber(smartGroupIDLabel)
print("\n")
JAMF_SmartGroup_ID = get_JAMF_SmartGroup_ID
usingFilter = "smartGroupFilter"
elif get_JAMF_Computers_Info_Results_Filter_Choice == 'No Filter, Return All Computers':
print("\nNot using JAMF Results Filter for the Computer Report.\n\n")
usingFilter = "noFilter"
#Get hardware Elements
print("\n******************** JAMF API Computer Info Hardware Section. ********************\n")
get_JAMF_Computers_Info_Hardware = getYesOrNoInput("Do you want to include JAMF Computer Hardware Info in Report? (yes or no): ")
if get_JAMF_Computers_Info_Hardware == ("yes"):
print("\nIncluding Computer Hardware Data.\n\n")
includeHardwareInfo = "yes"
elif get_JAMF_Computers_Info_Hardware == ("no"):
print("\nNot Including Computer Hardware Data.\n\n")
includeHardwareInfo = "no"
#Get FileVault2 Users
print("\n******************** JAMF API Computer Info FileVault2 Section. ********************\n")
get_JAMF_Computers_Info_FileVault2_Users = getYesOrNoInput("Do you want to include JAMF Computer Hardware FileVault Users Info in Report? (yes or no): ")
if get_JAMF_Computers_Info_FileVault2_Users == ("yes"):
print("\nIncluding FileVault2 Info Data.\n\n")
includeFileVault2Info = "yes"
elif get_JAMF_Computers_Info_FileVault2_Users == ("no"):
print("\nNot including FileVault2 Info Data.\n\n")
includeFileVault2Info = "no"
#Get Local Users Accounts
print("\n******************** JAMF API Computer Info Local Account Section. ********************\n")
get_JAMF_Computers_Info_Local_Account = getYesOrNoInput("Do you want to include JAMF Computer Hardware Local Account Info in Report? (yes or no): ")
if get_JAMF_Computers_Info_Local_Account == ("yes"):
print("\nIncluding Local Account Info Data.\n\n")
includeLocalAccountInfo = "yes"
print("\n******************** JAMF API Computer Info Local Account LDAP Section. ********************\n")
get_JAMF_Computers_Info_Local_Account_LDAP = getYesOrNoInput("Do you want to include JAMF Computer Hardware Local Accounts LDAP Verification in Report? (yes or no): ")
if get_JAMF_Computers_Info_Local_Account_LDAP == ("yes"):
print("\nIncluding Local Account Info LDAP Verification Data.\n\n")
includeLocalAccountInfoLDAP = "yes"
# Lookup JIM Server Name
url = JAMF_url + "/JSSResource/ldapservers"
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
resp = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#For Testing
#print(resp)
JIMServerRecords = resp['ldap_servers']
JIMServerRecords.sort(key=lambda item: item.get('id'), reverse=False)
for JIMServer in JIMServerRecords:
JIMServerRecordsName = JIMServer['name']
JIMServerList.append(JIMServerRecordsName)
JIMServerlabel = "Please choose the JIM Server you would like to use:"
JimServerChoice = let_user_pick(JIMServerlabel, JIMServerList)
JIMServerNameForURL = (JIMServerList[JimServerChoice])
JIMServerLDAPLookupURL = "/JSSResource/ldapservers/name/" + JIMServerNameForURL
elif get_JAMF_Computers_Info_Local_Account_LDAP == ("no"):
print("\nIncluding Local Account Info LDAP Verification Data.\n\n")
includeLocalAccountInfoLDAP = "no"
elif get_JAMF_Computers_Info_Local_Account == ("no"):
print("\nNot including Local Account Info Data.\n\n")
includeLocalAccountInfo = "no"
#Get Group Membership
print("\n******************** JAMF API Computer Info Computer Group Membership Section. ********************\n")
get_JAMF_Computers_Info_Computer_Group_Membership = getYesOrNoInput("Do you want to include JAMF Computer Hardware Computer Group Membership Info in Report? (yes or no): ")
if get_JAMF_Computers_Info_Computer_Group_Membership == ("yes"):
print("\nIncluding Computer Group Membership Info Data.\n\n")
includeComputerGroupMembershipInfo = "yes"
elif get_JAMF_Computers_Info_Computer_Group_Membership == ("no"):
print("\nNot including Computer Group Membership Info Data.\n\n")
includeComputerGroupMembershipInfo = "no"
#Get Config Profile Membership
print("\n******************** JAMF API Computer Info Computer Configuration Profile Membership Section. ********************\n")
get_JAMF_Computers_Info_Computer_Configuration_Profile_Membership = getYesOrNoInput("Do you want to include JAMF Computer Hardware Configuration Profile Membership Info in Report? (yes or no): ")
if get_JAMF_Computers_Info_Computer_Configuration_Profile_Membership == ("yes"):
print("\nIncluding Computer Configuration Profile Membership Info Data.\n\n")
includeComputerConfigurationProfileMembershipInfo = "yes"
elif get_JAMF_Computers_Info_Computer_Configuration_Profile_Membership == ("no"):
print("\nNot including Computer Group Membership Info Data.\n\n")
includeComputerConfigurationProfileMembershipInfo = "no"
elif get_JAMF_Computers_Info == ("no"):
includeComputerInfo = "no"
usingFilter = "noFilter"
includeHardwareInfo = "no"
includeFileVault2Info = "no"
includeLocalAccountInfo = "no"
includeComputerGroupMembershipInfo = "no"
includeComputerConfigurationProfileMembershipInfo = "no"
##################################################
# Get Jamf Policy Info
##################################################
print("\n\n******************** JAMF API Report Included Excel Sheets Config Info ********************\n")
if get_JAMF_Policy_Info == ("yes"):
#Get Policy Info
print("\nIncluding JAMF Policy Info.\n\n")
includePolicyInfo = "yes"
#Get Policy Self Service Elements
print("\n******************** JAMF API Policy Self Service Section. ********************\n")
get_JAMF_Policy_Info_SelfService = getYesOrNoInput("Do you want to include JAMF Policy Self Service Info in Report? (yes or no): ")
if get_JAMF_Policy_Info_SelfService == ("yes"):
print("\nIncluding Self Service Data.\n\n")
includeSelfServiceInfo = "yes"
elif get_JAMF_Policy_Info_SelfService == ("no"):
print("\nNot Including Self Service Data.\n\n")
includeSelfServiceInfo = "no"
#Get Policy Targets
print("\n******************** JAMF API Policy Targets Section. ********************\n")
get_JAMF_Policy_Info_Targets = getYesOrNoInput("Do you want to include JAMF Policy Targets Info in Report? (yes or no): ")
if get_JAMF_Policy_Info_Targets == ("yes"):
print("\nIncluding Target Data.\n\n")
includeTargetsInfo = "yes"
elif get_JAMF_Policy_Info_Targets == ("no"):
print("\nNot Including Target Data.\n\n")
includeTargetsInfo = "no"
#Get Policy Exclusions
print("\n******************** JAMF API Policy Exclusions Section. ********************\n")
get_JAMF_Policy_Info_Exclusions = getYesOrNoInput("Do you want to include JAMF Policy Exclusions Info in Report? (yes or no): ")
if get_JAMF_Policy_Info_Exclusions == ("yes"):
print("\nIncluding Exclusions Data.\n\n")
includeExclusionsInfo = "yes"
elif get_JAMF_Policy_Info_Exclusions == ("no"):
print("\nNot Including Exclusions Data.\n\n")
includeExclusionsInfo = "no"
#Get Policy Package Elements
print("\n******************** JAMF API Policy Packages Section. ********************\n")
get_JAMF_Policy_Info_Packages = getYesOrNoInput("Do you want to include JAMF Policy Packages Info in Report? (yes or no): ")
if get_JAMF_Policy_Info_Packages == ("yes"):
print("\nIncluding Package Data.\n\n")
includePackagesInfo = "yes"
elif get_JAMF_Policy_Info_Packages == ("no"):
print("\nNot Including Package Data.\n\n")
includePackagesInfo = "no"
#Get Policy Script Elements
print("\n******************** JAMF API Policy Scripts Section. ********************\n")
get_JAMF_Policy_Info_Scripts = getYesOrNoInput("Do you want to include JAMF Policy Scripts Info in Report? (yes or no): ")
if get_JAMF_Policy_Info_Scripts == ("yes"):
print("\nIncluding Scripts Data.\n\n")
includeScriptsInfo = "yes"
elif get_JAMF_Policy_Info_Scripts == ("no"):
print("\nNot Including Scripts Data.\n\n")
includeScriptsInfo = "no"
elif get_JAMF_Policy_Info == ("no"):
includePolicyInfo = "no"
##################################################
# Get Configuration Profile Info
##################################################
print("\n\n******************** JAMF API Report Included Excel Sheets Config Info ********************\n")
if get_JAMF_Configuration_Profile_Info == ("yes"):
#Get Configuration Profile Info
print("Including Configuration Profile Info.\n\n")
includeConfigurationProfileInfo = "yes"
#Get Policy Targets
print("\n******************** JAMF API Configuration Profile Targets Section. ********************\n")
get_JAMF_Configuration_Profile_Info_Targets = getYesOrNoInput("Do you want to include JAMF Configuration Profile Targets Info in Report? (yes or no): ")
if get_JAMF_Configuration_Profile_Info_Targets == ("yes"):
print("\nIncluding Target Data.\n\n")
includeConfigurationProfileTargetsInfo = "yes"
elif get_JAMF_Configuration_Profile_Info_Targets == ("no"):
print("\nNot Including Target Data.\n\n")
includeConfigurationProfileTargetsInfo = "no"
#Get Policy Exclusions
print("\n******************** JAMF API Configuration Profile Exclusions Section. ********************\n")
get_JAMF_Configuration_Profile_Info_Exclusions = getYesOrNoInput("Do you want to include JAMF Configuration Profile Exclusions Info in Report? (yes or no): ")
if get_JAMF_Configuration_Profile_Info_Exclusions == ("yes"):
print("\nIncluding Exclusions Data.\n\n")
includeConfigurationProfileExclusionsInfo = "yes"
elif get_JAMF_Configuration_Profile_Info_Exclusions == ("no"):
print("\nNot Including Exclusions Data.\n\n")
includeConfigurationProfileExclusionsInfo = "no"
elif get_JAMF_Configuration_Profile_Info == ("no"):
includeConfigurationProfileInfo = "no"
##################################################
# Get Jamf Package To Policy Info
##################################################
print("\n\n******************** JAMF API Report Included Package To Policy Info ********************\n")
if get_JAMF_Package_To_Policy_Info == ("yes"):
#Get Package To Policy Info
print("\nIncluding JAMF Regular Package Info.\n\n")
includeRegularPackageToPolicyInfo = "yes"
#Get Policy Exclusions
print("\n******************** JAMF API Package To Policy in PreStage Policy Section. ********************\n")
get_JAMF_Policy_in_PreStage_Policy_Info = getYesOrNoInput("Do you want to include JAMF Package To Policy in PreStage Policy Info in Report? (yes or no): ")
if get_JAMF_Policy_in_PreStage_Policy_Info == ("yes"):
print("\nIncluding PreStage Policy Info.\n\n")
includePreStagePackageToPolicyInfo = "yes"
elif get_JAMF_Configuration_Profile_Info_Exclusions == ("no"):
print("\nNot Including PreStage Policy Info.\n\n")
includePreStagePackageToPolicyInfo = "no"
##################################################
# Set Variables for dict
##################################################
#Check Options set and desplay message to user
if get_JAMF_Computers_Info == 'yes' or get_JAMF_Policy_Info == 'yes' or get_JAMF_Configuration_Profile_Info == 'yes' or get_JAMF_Package_To_Policy_Info == 'yes':
print("\n******************** Running Requested Report Now. ********************\n\n")
##################################################
# Set Variables for Dict for Computers Info
##################################################
if usingFilter == 'computerFilter':
dataToCVS_JAMF_Computers_Info = "{'Type':'',\
\
'Computer ID':'',\
\
'Computer Name':'',\
\
'Computer Serial Number':''}"
elif usingFilter == 'smartGroupFilter':
dataToCVS_JAMF_Computers_Info = "{'Computer SmartGroup ID':'',\
\
'Computer SmartGroup Name':'',\
\
'Type':'',\
\
'Computer ID':'',\
\
'Computer Name':'',\
\
'Computer Serial Number':''}"
elif usingFilter == 'noFilter':
dataToCVS_JAMF_Computers_Info = "{'Type':'',\
\
'Computer ID':'',\
\
'Computer Name':'',\
\
'Computer Serial Number':''}"
dataToCVS_JAMF_Computers_Hardware_Info = "{'Computer Make':'',\
\
'Computer Model':'',\
\
'Computer Model Identifier':'',\
\
'Computer OS Name':'',\
\
'Computer OS Version':'',\
\
'Computer OS Build':''}"
dataToCVS_JAMF_Computers_FileVault2_Info = "{'Computer FileVault2 User':''}"
dataToCVS_JAMF_Computers_Local_Account_Info = "{'Computer Local Account Name':'',\
\
'Computer Local Account Real Name':'',\
\
'Computer Local Account ID':'',\
\
'Computer Local Account is Admin ':'',\
\
'Computer Local Account in LDAP ':''}"
dataToCVS_JAMF_Computers_Info_Computer_Group_Membership = "{'Computer Group Membership Group ID':'',\
\
'Computer Group Membership Group Name':'',\
\
'Computer Group Membership Group Is Smart':''}"
dataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership = "{'Computer Configuration Profile Membership ID':'',\
\
'Computer Configuration Profile Membership Name':''}"
##################################################
# Set Variables for Dict for Policy Info
##################################################
dataToCVS_JAMF_Policy_Info = "{'Type':'',\
\
'Policy ID':'',\
\
'Policy Name':'',\
\
'Policy Category ID':'',\
\
'Policy Category Name':''}"
dataToCVS_JAMF_Policy_SelfService_Info = "{'Policy In SelfService':'',\
\
'Policy In SelfService Name':''}"
dataToCVS_JAMF_Policy_Target_Info = "{'Policy Target All Computers':'',\
\
'Policy Target Computer ID':'',\
\
'Policy Target Computer Name':'',\
\
'Policy Target Group ID':'',\
\
'Policy Target Group Name':'',\
\
'Policy Target Group is Smart':''}"
dataToCVS_JAMF_Policy_Exclusion_Info = "{'Policy Exclusion Computer ID':'',\
\
'Policy Exclusion Computer Name':'',\
\
'Policy Exclusion Group id':'',\
\
'Policy Exclusion Group Name':'',\
\
'Policy Exclusion Group is Smart':''}"
dataToCVS_JAMF_Policy_Packages_Info = "{'Policy Package ID':'',\
\
'Policy Package Name':'',\
\
'Policy Package Category Name':'',\
\
'Policy Package Filename':''}"
dataToCVS_JAMF_Policy_Scripts_Info = "{'Policy Script ID':'',\
\
'Policy Script Name':'',\
\
'Policy Script Category Name':'',\
\
'Policy Script Filename':''}"
##################################################
# Set Variables for Dict for Configuration Profile Info
##################################################
dataToCVS_JAMF_Configuration_Profile_Info = "{'Configuration Profile ID':'',\
\
'Configuration Profile Type':'',\
\
'Configuration Profile Name':'',\
\
'Configuration Profile Category ID':'',\
\
'Configuration Profile Category Name':''}"
dataToCVS_JAMF_Configuration_Profile_Target_Info = "{'Configuration Profile Target Computer ID':'',\
\
'Configuration Profile Target Computer Name':'',\
\
'Configuration Profile Target Group ID':'',\
\
'Configuration Profile Target Group Name':'',\
\
'Configuration Profile Target Group is Smart':''}"
dataToCVS_JAMF_Configuration_Profile_Exclusion_Info = "{'Configuration Profile Exclusion Computer id':'',\
\
'Configuration Profile Exclusion Computer Name':'',\
\
'Configuration Profile Exclusion Group id':'',\
\
'Configuration Profile Exclusion Group Name':'',\
\
'Configuration Profile Exclusion Group is Smart':''}"
##################################################
# Set Variables for Dict for Packages to Policies Info
##################################################
dataToCVS_JAMF_Package_To_Regular_Policy_Info = "{'Type':'',\
\
'Package List':'',\
\
'Package ID':'',\
\
'Package Name':'',\
\
'Package File Name':'',\
\
'Policy ID':'',\
\
'Policy Name':''}"
dataToCVS_JAMF_Package_To_PreStage_Policy_Info = "{'Type':'',\
\
'Package List':'',\
\
'Package ID':'',\
\
'Package Name':'',\
\
'Package File Name':'',\
\
'Policy ID':'',\
\
'Policy Name':''}"
dataToCVS_JAMF_Package_Unused_Info = "{'Type':'',\
\
'Package List':'',\
\
'Package ID':'',\
\
'Package Name':'',\
\
'Package File Name':''}"
##################################################
# Set Variables for Dict for Computers Info to empty
##################################################
if usingFilter == 'computerFilter':
dataToCVS_JAMF_Computers_Info_Empty = "{'Type':'',\
\
'Computer ID':'',\
\
'Computer Name':'',\
\
'Computer Serial Number':''}"
elif usingFilter == 'smartGroupFilter':
dataToCVS_JAMF_Computers_Info_Empty = "{'Computer SmartGroup ID':'',\
\
'Computer SmartGroup Name':'',\
\
'Type':'',\
\
'Computer ID':'',\
\
'Computer Name':'',\
\
'Computer Serial Number':''}"
elif usingFilter == 'noFilter':
dataToCVS_JAMF_Computers_Info_Empty = "{'Type':'',\
\
'Computer ID':'',\
\
'Computer Name':'',\
\
'Computer Serial Number':''}"
dataToCVS_JAMF_Computers_Hardware_Info_Empty = "{'Computer Make':'',\
\
'Computer Model':'',\
\
'Computer Model Identifier':'',\
\
'Computer OS Name':'',\
\
'Computer OS Version':'',\
\
'Computer OS Build':''}"
dataToCVS_JAMF_Computers_FileVault2_Info_Empty = "{'Computer FileVault2 User':''}"
dataToCVS_JAMF_Computers_Local_Account_Info_Empty = "{'Computer Local Account Name':'',\
\
'Computer Local Account Real Name':'',\
\
'Computer Local Account ID':'',\
\
'Computer Local Account is Admin ':'',\
\
'Computer Local Account in LDAP ':''}"
dataToCVS_JAMF_Computers_Info_Computer_Group_Membership_Empty = "{'Computer Group Membership Group ID':'',\
\
'Computer Group Membership Group Name':'',\
\
'Computer Group Membership Group Is Smart':''}"
dataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership_Empty = "{'Computer Configuration Profile Membership ID':'',\
\
'Computer Configuration Profile Membership Name':''}"
##################################################
# Set Variables for Dict for Policy Info Empty
##################################################
dataToCVS_JAMF_Policy_Info_Empty = "{'Type':'',\
\
'Policy ID':'',\
\
'Policy Name':'',\
\
'Policy Category ID':'',\
\
'Policy Category Name':''}"
dataToCVS_JAMF_Policy_SelfService_Info_Empty = "{'Policy In SelfService':'',\
\
'Policy In SelfService Name':''}"
dataToCVS_JAMF_Policy_Target_Info_Empty = "{'Policy Target All Computers':'',\
\
'Policy Target Computer ID':'',\
\
'Policy Target Computer Name':'',\
\
'Policy Target Group ID':'',\
\
'Policy Target Group Name':'',\
\
'Policy Target Group is Smart':''}"
dataToCVS_JAMF_Policy_Exclusion_Info_Empty = "{'Policy Exclusion Computer ID':'',\
\
'Policy Exclusion Computer Name':'',\
\
'Policy Exclusion Group id':'',\
\
'Policy Exclusion Group Name':'',\
\
'Policy Exclusion Group is Smart':''}"
dataToCVS_JAMF_Policy_Packages_Info_Empty = "{'Policy Package ID':'',\
\
'Policy Package Name':'',\
\
'Policy Package Category Name':'',\
\
'Policy Package Filename':''}"
dataToCVS_JAMF_Policy_Scripts_Info_Empty = "{'Policy Script ID':'',\
\
'Policy Script Name':'',\
\
'Policy Script Category Name':'',\
\
'Policy Script Filename':''}"
##################################################
# Set Variables for Dict for Configuration Profile Info to empty
##################################################
dataToCVS_JAMF_Configuration_Profile_Info_Empty = "{'Configuration Profile ID':'',\
\
'Configuration Profile Type':'',\
\
'Configuration Profile Name':'',\
\
'Configuration Profile Category ID':'',\
\
'Configuration Profile Category Name':''}"
dataToCVS_JAMF_Configuration_Profile_Target_Info_Empty = "{'Configuration Profile Target Computer ID':'',\
\
'Configuration Profile Target Computer Name':'',\
\
'Configuration Profile Target Group ID':'',\
\
'Configuration Profile Target Group Name':'',\
\
'Configuration Profile Target Group is Smart':''}"
dataToCVS_JAMF_Configuration_Profile_Exclusion_Info_Empty = "{'Configuration Profile Exclusion Computer id':'',\
\
'Configuration Profile Exclusion Computer Name':'',\
\
'Configuration Profile Exclusion Group id':'',\
\
'Configuration Profile Exclusion Group Name':'',\
\
'Configuration Profile Exclusion Group is Smart':''}"
##################################################
# Set Variables for Dict for Configuration Profile Info to empty
##################################################
dataToCVS_JAMF_Package_To_Regular_Policy_Info_Empty = "{'Type':'',\
\
'Package List':'',\
\
'Package ID':'',\
\
'Package Name':'',\
\
'Package File Name':'',\
\
'Policy ID':'',\
\
'Policy Name':''}"
dataToCVS_JAMF_Package_To_PreStage_Policy_Info_Empty = "{'Type':'',\
\
'Package List':'',\
\
'Package ID':'',\
\
'Package Name':'',\
\
'Package File Name':'',\
\
'Policy ID':'',\
\
'Policy Name':''}"
dataToCVS_JAMF_Package_Unused_Info_Empty = "{'Type':'',\
\
'Package List':'',\
\
'Package ID':'',\
\
'Package Name':'',\
\
'Package File Name':''}"
##################################################
# Take Variables and make Dict
##################################################
# Computers Info
JAMF_Computers_Info = eval(dataToCVS_JAMF_Computers_Info)
JAMF_Computers_Hardware_Info = eval(dataToCVS_JAMF_Computers_Hardware_Info)
JAMF_Computers_FileVault2_Info = eval(dataToCVS_JAMF_Computers_FileVault2_Info)
JAMF_Computers_Local_Account_Info = eval(dataToCVS_JAMF_Computers_Local_Account_Info)
JAMF_Computers_Info_Computer_Group_Membership = eval(dataToCVS_JAMF_Computers_Info_Computer_Group_Membership)
JAMF_Computers_Info_Computer_Configuration_Profile_Membership = eval(dataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership)
# Policy Info
JAMF_Policy_Info = eval(dataToCVS_JAMF_Policy_Info)
JAMF_Policy_SelfService_Info = eval(dataToCVS_JAMF_Policy_SelfService_Info)
JAMF_Policy_Target_Info = eval(dataToCVS_JAMF_Policy_Target_Info)
JAMF_Policy_Exclusion_Info = eval(dataToCVS_JAMF_Policy_Exclusion_Info)
JAMF_Policy_Packages_Info = eval(dataToCVS_JAMF_Policy_Packages_Info)
JAMF_Policy_Scripts_Info = eval(dataToCVS_JAMF_Policy_Scripts_Info)
# Configuration Profile Info
JAMF_Configuration_Profile_Info = eval(dataToCVS_JAMF_Configuration_Profile_Info)
JAMF_Configuration_Profile_Target_Info = eval(dataToCVS_JAMF_Configuration_Profile_Target_Info)
JAMF_Configuration_Profile_Exclusion_Info = eval(dataToCVS_JAMF_Configuration_Profile_Exclusion_Info)
# Package to Policy Info
JAMF_Package_To_Regular_Policy_Info = eval(dataToCVS_JAMF_Package_To_Regular_Policy_Info)
JAMF_Package_To_PreStage_Policy_Info = eval(dataToCVS_JAMF_Package_To_PreStage_Policy_Info)
JAMF_Package_Unused_Info = eval(dataToCVS_JAMF_Package_Unused_Info)
##################################################
# Take Variables and make them a Empty Dict
##################################################
# Computers Info
JAMF_Computers_Info_Empty = eval(dataToCVS_JAMF_Computers_Info_Empty)
JAMF_Computers_Hardware_Info_Empty = eval(dataToCVS_JAMF_Computers_Hardware_Info_Empty)
JAMF_Computers_FileVault2_Info_Empty = eval(dataToCVS_JAMF_Computers_FileVault2_Info_Empty)
JAMF_Computers_Local_Account_Info_Empty = eval(dataToCVS_JAMF_Computers_Local_Account_Info_Empty)
JAMF_Computers_Info_Computer_Group_Membership_Empty = eval(dataToCVS_JAMF_Computers_Info_Computer_Group_Membership_Empty)
JAMF_Computers_Info_Computer_Configuration_Profile_Membership_Empty = eval(dataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership_Empty)
# Policy Info
JAMF_Policy_Info_Empty = eval(dataToCVS_JAMF_Policy_Info_Empty)
JAMF_Policy_SelfService_Info_Empty = eval(dataToCVS_JAMF_Policy_SelfService_Info_Empty)
JAMF_Policy_Target_Info_Empty = eval(dataToCVS_JAMF_Policy_Target_Info_Empty)
JAMF_Policy_Exclusion_Info_Empty = eval(dataToCVS_JAMF_Policy_Exclusion_Info_Empty)
JAMF_Policy_Packages_Info_Empty = eval(dataToCVS_JAMF_Policy_Packages_Info_Empty)
JAMF_Policy_Scripts_Info_Empty = eval(dataToCVS_JAMF_Policy_Scripts_Info_Empty)
# Configuration Profile Info
JAMF_Configuration_Profile_Info_Empty = eval(dataToCVS_JAMF_Configuration_Profile_Info_Empty)
JAMF_Configuration_Profile_Target_Info_Empty = eval(dataToCVS_JAMF_Configuration_Profile_Target_Info_Empty)
JAMF_Configuration_Profile_Exclusion_Info_Empty = eval(dataToCVS_JAMF_Configuration_Profile_Exclusion_Info_Empty)
# Package to Policy Info
JAMF_Package_To_Regular_Policy_Info_Empty = eval(dataToCVS_JAMF_Package_To_Regular_Policy_Info_Empty)
JAMF_Package_To_PreStage_Policy_Info_Empty = eval(dataToCVS_JAMF_Package_To_PreStage_Policy_Info_Empty)
JAMF_Package_Unused_Info_Empty = eval(dataToCVS_JAMF_Package_Unused_Info_Empty)
##################################################
# Build the dataToCsvPolicy
##################################################
# Computer Fields
if get_JAMF_Computers_Info == "yes":
if includeComputerInfo == "yes":
computerColumns = JAMF_Computers_Info
if includeHardwareInfo == "yes":
hardwareColumns = JAMF_Computers_Hardware_Info
elif includeHardwareInfo == "no":
hardwareColumns = JAMF_Computers_Hardware_Info_Empty
if includeFileVault2Info == "yes":
FileVault2Columns = JAMF_Computers_FileVault2_Info
elif includeFileVault2Info == "no":
FileVault2Columns = JAMF_Computers_FileVault2_Info_Empty
if includeLocalAccountInfo == "yes":
LocalAccountColumns = JAMF_Computers_Local_Account_Info
elif includeLocalAccountInfo == "no":
LocalAccountColumns = JAMF_Computers_Local_Account_Info_Empty
if includeComputerGroupMembershipInfo == 'yes':
computerGroupMembershipColumns = JAMF_Computers_Info_Computer_Group_Membership
elif includeComputerGroupMembershipInfo == 'no':
computerGroupMembershipColumns = JAMF_Computers_Info_Computer_Group_Membership_Empty
if includeComputerConfigurationProfileMembershipInfo == 'yes':
computerConfigurationProfileMembershipColumns = JAMF_Computers_Info_Computer_Configuration_Profile_Membership
elif includeComputerConfigurationProfileMembershipInfo == 'no':
computerConfigurationProfileMembershipColumns = JAMF_Computers_Info_Computer_Configuration_Profile_Membership_Empty
elif get_JAMF_Computers_Info == "no":
computerColumns = JAMF_Computers_Info_Empty
hardwareColumns = JAMF_Computers_Hardware_Info_Empty
FileVault2Columns = JAMF_Computers_FileVault2_Info_Empty
LocalAccountColumns = JAMF_Computers_Local_Account_Info_Empty
computerGroupMembershipColumns = JAMF_Computers_Info_Computer_Group_Membership_Empty
computerConfigurationProfileMembershipColumns = JAMF_Computers_Info_Computer_Configuration_Profile_Membership_Empty
# Policy Fields
if get_JAMF_Policy_Info == "yes":
if includePolicyInfo == "yes":
policyColumns = JAMF_Policy_Info
if includeSelfServiceInfo == "yes":
selfServiceColumns = JAMF_Policy_SelfService_Info
elif includeSelfServiceInfo == "no":
selfServiceColumns = JAMF_Policy_SelfService_Info_Empty
if includeTargetsInfo == "yes":
targetsColumns = JAMF_Policy_Target_Info
elif includeTargetsInfo == "no":
targetsColumns = JAMF_Policy_Target_Info_Empty
if includeExclusionsInfo == "yes":
exclusionColumns = JAMF_Policy_Exclusion_Info
elif includeExclusionsInfo == "no":
exclusionColumns = JAMF_Policy_Exclusion_Info_Empty
if includePackagesInfo == "yes":
packageColumns = JAMF_Policy_Packages_Info
elif includePackagesInfo == "no":
packageColumns = JAMF_Policy_Packages_Info_Empty
if includeScriptsInfo == "yes":
scriptsColumns = JAMF_Policy_Scripts_Info
elif includeScriptsInfo == "no":
scriptsColumns = JAMF_Policy_Scripts_Info_Empty
elif get_JAMF_Policy_Info == "no":
policyColumns = JAMF_Policy_Info_Empty
selfServiceColumns = JAMF_Policy_SelfService_Info_Empty
targetsColumns = JAMF_Policy_Target_Info_Empty
exclusionColumns = JAMF_Policy_Exclusion_Info_Empty
packageColumns = JAMF_Policy_Packages_Info_Empty
scriptsColumns = JAMF_Policy_Scripts_Info_Empty
# Configuration Profile Fields
if get_JAMF_Configuration_Profile_Info == "yes":
if includeConfigurationProfileInfo == "yes":
configProfileColumns = JAMF_Configuration_Profile_Info
elif includeConfigurationProfileInfo == "no":
configProfileColumns = JAMF_Configuration_Profile_Info_Empty
if includeConfigurationProfileTargetsInfo == "yes":
configProfileTargetsColumns = JAMF_Configuration_Profile_Target_Info
elif includeConfigurationProfileTargetsInfo == "no":
configProfileTargetsColumns = JAMF_Configuration_Profile_Target_Info_Empty
if includeConfigurationProfileExclusionsInfo == "yes":
configProfileExclusionsColumns = JAMF_Configuration_Profile_Exclusion_Info
elif includeConfigurationProfileExclusionsInfo == "no":
configProfileExclusionsColumns = JAMF_Configuration_Profile_Exclusion_Info_Empty
elif get_JAMF_Configuration_Profile_Info == "no":
configProfileColumns = JAMF_Configuration_Profile_Info_Empty
configProfileTargetsColumns = JAMF_Configuration_Profile_Target_Info_Empty
configProfileExclusionsColumns = JAMF_Configuration_Profile_Exclusion_Info_Empty
# Package to Policy Info fields
if get_JAMF_Package_To_Policy_Info == 'yes':
# Regular columns
if includeRegularPackageToPolicyInfo == "yes":
packageToRegularPolicyColumns = JAMF_Package_To_Regular_Policy_Info
packageUnusedColumns = JAMF_Package_Unused_Info
elif includeRegularPackageToPolicyInfo == "no":
packageToRegularPolicyColumns = JAMF_Package_To_Regular_Policy_Info_Empty
packageUnusedColumns = JAMF_Package_Unused_Info_Empty
#prestage columns
if includePreStagePackageToPolicyInfo == "yes":
packageToPreStagePolicyColumns = JAMF_Package_To_PreStage_Policy_Info
packageUnusedColumns = JAMF_Package_Unused_Info
elif includePreStagePackageToPolicyInfo == "no":
packageToPreStagePolicyColumns = JAMF_Package_To_PreStage_Policy_Info_Empty
packageUnusedColumns = JAMF_Package_Unused_Info_Empty
##########################################################################################
# Process Requested Info for Sheets
##########################################################################################
if get_JAMF_Computers_Info == ("yes"):
##########################################################################################
# Process Computers information for csv / Excel
##########################################################################################
# Set up url for getting a list of all Computers from JAMF API
if usingFilter == 'computerFilter':
url = JAMF_url + "/JSSResource/computers/id/" + JAMF_Computer_ID
elif usingFilter == 'smartGroupFilter':
url = JAMF_url + "/JSSResource/computergroups/id/" + JAMF_SmartGroup_ID
elif usingFilter == 'noFilter':
url = JAMF_url + "/JSSResource/computers"
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
resp = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(response.json())
#Choose filter for records
if usingFilter == 'computerFilter':
computerRecords = resp['computer']['general']
elif usingFilter == 'smartGroupFilter':
computerRecords = resp['computer_group']['computers']
computerRecords.sort(key=lambda item: item.get('id'), reverse=False)
smartGroupRecords = resp['computer_group']
smartGroupRecordName = smartGroupRecords['name']
#Set Variables if Data Available
if len(str(smartGroupRecords['id'])) == 0:
smartGroupRecordID = ''
else:
smartGroupRecordID = int(smartGroupRecords['id'])
elif usingFilter == 'noFilter':
computerRecords = resp['computers']
computerRecords.sort(key=lambda item: item.get('id'), reverse=False)
# Process Computers List and get information linked to Computers
if usingFilter == 'computerFilter':
#run for single computer
# Get ID to do JAMF API lookup
computerRecordID = str(computerRecords['id'])
#For Testing
#print(computerRecordID)
# Set up url for getting information from each configurationProfile ID from JAMF API
url = JAMF_url + "/JSSResource/computers/id/" + computerRecordID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
computerRecordProfile = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(computerRecordProfile)
#General Element for ID and Catagory
mycomputerRecordGeneral = computerRecordProfile['computer']['general']
mycomputerRecordHardware = computerRecordProfile['computer']['hardware']
mycomputerRecordHardwareFileVault2Users = computerRecordProfile['computer']['hardware']['filevault2_users']
mycomputerRecordHardwareLocalAccounts = computerRecordProfile['computer']['groups_accounts']['local_accounts']
mycomputerRecordComputerGroupMembership = computerRecordProfile['computer']['groups_accounts']['computer_group_memberships']
mycomputerConfigurationProfileMembership = computerRecordProfile['computer']['configuration_profiles']
##########################################################################################
# Process ConfigurationProfile information for csv / Excel
##########################################################################################
# Individual Computers Info for each record
getMycomputerRecordGeneralID = (str(mycomputerRecordGeneral['id']) + " - " + mycomputerRecordGeneral['name'])
# Get info for Policies
print("Working on Computer ID: " + getMycomputerRecordGeneralID)
#Set Variables if Data Available
if len(str(mycomputerRecordGeneral['id'])) == 0:
mycomputerRecordGeneralID = ''
else:
mycomputerRecordGeneralID = int(mycomputerRecordGeneral['id'])
# Set Variables for Dict for Computers Info
if usingFilter == 'computerFilter':
appendDataToCVS_JAMF_Computers_Info = "{'Type':'Computer Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Serial Number':str(mycomputerRecordGeneral['serial_number'])}"
elif usingFilter == 'smartGroupFilter':
appendDataToCVS_JAMF_Computers_Info = "{'Computer SmartGroup ID':smartGroupRecordID,\
\
'Computer SmartGroup Name':smartGroupRecordName,\
\
'Type':'Computer Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Serial Number':str(mycomputerRecordGeneral['serial_number'])}"
elif usingFilter == 'noFilter':
appendDataToCVS_JAMF_Computers_Info = "{'Type':'Computer Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Serial Number':str(mycomputerRecordGeneral['serial_number'])}"
appendJAMF_Computers_Info = eval(appendDataToCVS_JAMF_Computers_Info)
appendComputerColumns = appendJAMF_Computers_Info
#Set Columns
Combined = MergeComputersInfo(appendComputerColumns, hardwareColumns, FileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Hardware == ("yes"):
##########################################################################################
# Get info for Hardware
##########################################################################################
formatMyComputerRecordHardwareOSBuild = f"\"{mycomputerRecordHardware['os_build']}\""
appendDataToCVS_JAMF_Computers_Hardware_Info = "{'Type':'Computer Hardware Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Make':mycomputerRecordHardware['make'],\
\
'Computer Model':mycomputerRecordHardware['model'],\
\
'Computer Model Identifier':mycomputerRecordHardware['model_identifier'],\
\
'Computer OS Name':mycomputerRecordHardware['os_name'],\
\
'Computer OS Version':str(mycomputerRecordHardware['os_version']),\
\
'Computer OS Build':formatMyComputerRecordHardwareOSBuild}"
appendJAMF_Computers_Hardware_Info = eval(appendDataToCVS_JAMF_Computers_Hardware_Info)
appendComputerHardwareColumns = appendJAMF_Computers_Hardware_Info
#Set Columns
Combined = MergeComputersInfo(computerColumns, appendComputerHardwareColumns, FileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_FileVault2_Users == ("yes"):
##########################################################################################
# Get info for FileVautl2
##########################################################################################
for FileVault2User in mycomputerRecordHardwareFileVault2Users :
appendDataToCVS_JAMF_Computers_FileVault2_Info = "{'Type':'Computer Hardware FileVault2 Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer FileVault2 User':FileVault2User}"
appendJAMF_Computers_FileVault2_Info = eval(appendDataToCVS_JAMF_Computers_FileVault2_Info)
appendComputerFileVault2Columns = appendJAMF_Computers_FileVault2_Info
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, appendComputerFileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Local_Account == ("yes"):
##########################################################################################
# Get info for Local Accounts
##########################################################################################
for computerLocalAccount in mycomputerRecordHardwareLocalAccounts:
# Put current data into variable to filter
filterComputerLocalAccountData = computerLocalAccount['name']
# Regex Pattern
filterPattern = r"^((?![_/][a-zA-Z]*))"
filterDefaultUserAccountsListdata = filterDefaultUserAccountsList
if re.match(filterPattern, filterComputerLocalAccountData): #Check if regex is correct
if filterComputerLocalAccountData not in filterDefaultUserAccountsListdata :
verifyLocalAccountIsAdmin = computerLocalAccount['administrator']
computerLocalAccountName = computerLocalAccount['name']
computerLocalAccountRealName = computerLocalAccount['realname']
#Set Variables if Data Available
if len(str(computerLocalAccount['uid'])) == 0:
computerLocalAccountUID = ''
else:
computerLocalAccountUID = int(computerLocalAccount['uid'])
computerLocalAccountIsAdmin = verifyLocalAccountIsAdmin
computerInInLDAP = "false"
if includeLocalAccountInfoLDAP == "yes":
# Set up url for getting information from each configurationProfile ID from JAMF API
url = JAMF_url + JIMServerLDAPLookupURL + "/user/" + filterComputerLocalAccountData
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
verifyLocalAccount = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(verifyLocalAccount)
verifidLocalAccountRecords = verifyLocalAccount['ldap_users']
verifidLocalAccountRecords.sort(key=lambda item: item.get('id'), reverse=False)
for localAccountRecord in verifidLocalAccountRecords :
#print(localAccountRecord['username'])
#Set Variables if Data Available
if len(str(localAccountRecord['uid'])) == 0:
computerLocalAccountUID = ''
else:
computerLocalAccountUID = int(localAccountRecord['uid'])
computerInInLDAP = "true"
appendDataToCVS_JAMF_Computers_Local_Account_Info = "{'Type':'Computer Hardware Local Account Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Local Account Name':computerLocalAccountName,\
\
'Computer Local Account Real Name':computerLocalAccountRealName,\
\
'Computer Local Account ID':computerLocalAccountUID,\
\
'Computer Local Account is Admin ':computerLocalAccountIsAdmin,\
\
'Computer Local Account in LDAP ':computerInInLDAP}"
appendJAMF_Computers_Local_Account_Info = eval(appendDataToCVS_JAMF_Computers_Local_Account_Info)
appendLocalAccountColumns = appendJAMF_Computers_Local_Account_Info
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, FileVault2Columns, appendLocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Computer_Group_Membership == 'yes':
##########################################################################################
# Get info for Computer Group Membership
##########################################################################################
#Get Info from record
computerGroupMembershipRecords = mycomputerRecordComputerGroupMembership
#Get Computer Group Info
for group in computerGroupMembershipRecords:
#Renew token because the report is a long process
#renew token
url = "https://iqvia.jamfcloud.com/api/v1/auth/keep-alive"
token = http.post(url, headers=btHeaders)
bearer = token.json()['token']
btHeaders = {
'Accept': 'application/json',
'Authorization': 'Bearer '+bearer
}
#only need group name for list
computerGroupMembershipName = group
#do look up for each name interation
# Lookup group info by computer id
url = JAMF_url + "/JSSResource/computergroups/name/" + computerGroupMembershipName
try:
computerGroupMembershipNameResponse = http.get(url, headers=btHeaders)
computerGroupMembershipNameResponse.raise_for_status()
resp = computerGroupMembershipNameResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#For Testing
#print(resp)
#Set Variables if Data Available
if len(str(resp['computer_group']['id'])) == 0:
mygroupMembershipId = ''
else:
mygroupMembershipId = int(resp['computer_group']['id'])
groupMembershipName = resp['computer_group']['name']
groupMembershipIsSmart = resp['computer_group']['is_smart']
appendDataToCVS_JAMF_Computers_Info_Computer_Group_Membership = "{'Type':'Computer Group Membership Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Group Membership Group ID':mygroupMembershipId,\
\
'Computer Group Membership Group Name':groupMembershipName,\
\
'Computer Group Membership Group Is Smart':groupMembershipIsSmart}"
appendJAMF_Computers_Info_Computer_Group_Membership = eval(appendDataToCVS_JAMF_Computers_Info_Computer_Group_Membership)
appendComputerGroupMembershipColumns = appendJAMF_Computers_Info_Computer_Group_Membership
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, FileVault2Columns, LocalAccountColumns, appendComputerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Computer_Configuration_Profile_Membership == 'yes':
##########################################################################################
# Get info for Computer Configuration Profile Membership
##########################################################################################
#Get Info from record
computerConfigurationProfileMembership = mycomputerConfigurationProfileMembership
#Get Computer Group Info
for ConfigProfile in computerConfigurationProfileMembership:
#Renew token because the report is a long process
#renew token
url = "https://iqvia.jamfcloud.com/api/v1/auth/keep-alive"
token = http.post(url, headers=btHeaders)
bearer = token.json()['token']
btHeaders = {
'Accept': 'application/json',
'Authorization': 'Bearer '+bearer
}
if ConfigProfile['id'] > 0:
configurationProfileID = str(ConfigProfile['id'])
#For testing
#print(configurationProfileID)
# Set up url for getting information from each configurationProfile ID from JAMF API
url = JAMF_url + "/JSSResource/osxconfigurationprofiles/id/" + configurationProfileID
try:
computerConfigurationProfileMembershipResponse = http.get(url, headers=btHeaders)
computerConfigurationProfileMembershipResponse.raise_for_status()
resp = computerConfigurationProfileMembershipResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
continue
except Exception as err:
print(f'Other error occurred: {err}')
continue
#For Testing
#print(resp)
#General Element for ID and Catagory
myConfigurationProfileGeneral = resp['os_x_configuration_profile']['general']
myConfigurationProfileGeneralID = myConfigurationProfileGeneral['id']
myConfigurationProfileGeneralName = myConfigurationProfileGeneral['name']
print(myConfigurationProfileGeneral)
print(myConfigurationProfileGeneralID)
print(myConfigurationProfileGeneralName)
appendDataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership = "{'Type':'Computer Configuration Profile Membership Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Configuration Profile Membership ID':myConfigurationProfileGeneralID,\
\
'Computer Configuration Profile Membership Name':myConfigurationProfileGeneralName}"
appendJAMF_Computers_Info_Computer_Configuration_Profile_Membership = eval(appendDataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership)
appendComputerConfigurationProfileMembershipColumns = appendJAMF_Computers_Info_Computer_Configuration_Profile_Membership
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, FileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, appendComputerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
else:
#Run for smart group on no filter
for computerRecord in computerRecords:
# Get ID to do JAMF API lookup
computerRecordID = str(computerRecord['id'])
#For Testing
#print(computerRecordID)
# Set up url for getting information from each configurationProfile ID from JAMF API
url = JAMF_url + "/JSSResource/computers/id/" + computerRecordID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
computerRecordProfile = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(computerRecordProfile)
#General Element for ID and Catagory
mycomputerRecordGeneral = computerRecordProfile['computer']['general']
mycomputerRecordHardware = computerRecordProfile['computer']['hardware']
mycomputerRecordHardwareFileVault2Users = computerRecordProfile['computer']['hardware']['filevault2_users']
mycomputerRecordHardwareLocalAccounts = computerRecordProfile['computer']['groups_accounts']['local_accounts']
mycomputerRecordComputerGroupMembership = computerRecordProfile['computer']['groups_accounts']['computer_group_memberships']
mycomputerConfigurationProfileMembership = computerRecordProfile['computer']['configuration_profiles']
##########################################################################################
# Process ConfigurationProfile information for csv / Excel
##########################################################################################
# Individual Computers Info for each record
getMycomputerRecordGeneralID = (str(mycomputerRecordGeneral['id']) + " - " + mycomputerRecordGeneral['name'])
# Get info for Policies
print("Working on Computer ID: " + getMycomputerRecordGeneralID)
#Set Variables if Data Available
if len(str(mycomputerRecordGeneral['id'])) == 0:
mycomputerRecordGeneralID = ''
else:
mycomputerRecordGeneralID = int(mycomputerRecordGeneral['id'])
# Set Variables for Dict for Computers Info
if usingFilter == 'computerFilter':
appendDataToCVS_JAMF_Computers_Info = "{'Type':'Computer Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Serial Number':str(mycomputerRecordGeneral['serial_number'])}"
elif usingFilter == 'smartGroupFilter':
appendDataToCVS_JAMF_Computers_Info = "{'Computer SmartGroup ID':smartGroupRecordID,\
\
'Computer SmartGroup Name':smartGroupRecordName,\
\
'Type':'Computer Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Serial Number':str(mycomputerRecordGeneral['serial_number'])}"
elif usingFilter == 'noFilter':
appendDataToCVS_JAMF_Computers_Info = "{'Type':'Computer Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Serial Number':str(mycomputerRecordGeneral['serial_number'])}"
appendJAMF_Computers_Info = eval(appendDataToCVS_JAMF_Computers_Info)
appendComputerColumns = appendJAMF_Computers_Info
#Set Columns
Combined = MergeComputersInfo(appendComputerColumns, hardwareColumns, FileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Hardware == ("yes"):
##########################################################################################
# Get info for Hardware
##########################################################################################
formatMyComputerRecordHardwareOSBuild = f"\"{mycomputerRecordHardware['os_build']}\""
appendDataToCVS_JAMF_Computers_Hardware_Info = "{'Type':'Computer Hardware Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Make':mycomputerRecordHardware['make'],\
\
'Computer Model':mycomputerRecordHardware['model'],\
\
'Computer Model Identifier':mycomputerRecordHardware['model_identifier'],\
\
'Computer OS Name':mycomputerRecordHardware['os_name'],\
\
'Computer OS Version':str(mycomputerRecordHardware['os_version']),\
\
'Computer OS Build':formatMyComputerRecordHardwareOSBuild}"
appendJAMF_Computers_Hardware_Info = eval(appendDataToCVS_JAMF_Computers_Hardware_Info)
appendComputerHardwareColumns = appendJAMF_Computers_Hardware_Info
#Set Columns
Combined = MergeComputersInfo(computerColumns, appendComputerHardwareColumns, FileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_FileVault2_Users == ("yes"):
##########################################################################################
# Get info for FileVautl2
##########################################################################################
for FileVault2User in mycomputerRecordHardwareFileVault2Users :
appendDataToCVS_JAMF_Computers_FileVault2_Info = "{'Type':'Computer Hardware FileVault2 Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer FileVault2 User':FileVault2User}"
appendJAMF_Computers_FileVault2_Info = eval(appendDataToCVS_JAMF_Computers_FileVault2_Info)
appendComputerFileVault2Columns = appendJAMF_Computers_FileVault2_Info
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, appendComputerFileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Local_Account == ("yes"):
##########################################################################################
# Get info for Local Accounts
##########################################################################################
for computerLocalAccount in mycomputerRecordHardwareLocalAccounts:
# Put current data into variable to filter
filterComputerLocalAccountData = computerLocalAccount['name']
# Regex Pattern
filterPattern = r"^((?![_/][a-zA-Z]*))"
filterDefaultUserAccountsListdata = filterDefaultUserAccountsList
if re.match(filterPattern, filterComputerLocalAccountData): #Check if regex is correct
if filterComputerLocalAccountData not in filterDefaultUserAccountsListdata :
verifyLocalAccountIsAdmin = computerLocalAccount['administrator']
computerLocalAccountName = computerLocalAccount['name']
computerLocalAccountRealName = computerLocalAccount['realname']
#Set Variables if Data Available
if len(str(computerLocalAccount['uid'])) == 0:
computerLocalAccountUID = ''
else:
computerLocalAccountUID = int(computerLocalAccount['uid'])
computerLocalAccountIsAdmin = verifyLocalAccountIsAdmin
computerInInLDAP = "false"
if includeLocalAccountInfoLDAP == "yes":
# Set up url for getting information from each configurationProfile ID from JAMF API
url = JAMF_url + JIMServerLDAPLookupURL + "/user/" + filterComputerLocalAccountData
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
verifyLocalAccount = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(verifyLocalAccount)
verifidLocalAccountRecords = verifyLocalAccount['ldap_users']
verifidLocalAccountRecords.sort(key=lambda item: item.get('id'), reverse=False)
for localAccountRecord in verifidLocalAccountRecords :
#print(localAccountRecord['username'])
#Set Variables if Data Available
if len(str(localAccountRecord['uid'])) == 0:
computerLocalAccountUID = ''
else:
computerLocalAccountUID = int(localAccountRecord['uid'])
computerInInLDAP = "true"
appendDataToCVS_JAMF_Computers_Local_Account_Info = "{'Type':'Computer Hardware Local Account Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Local Account Name':computerLocalAccountName,\
\
'Computer Local Account Real Name':computerLocalAccountRealName,\
\
'Computer Local Account ID':computerLocalAccountUID,\
\
'Computer Local Account is Admin ':computerLocalAccountIsAdmin,\
\
'Computer Local Account in LDAP ':computerInInLDAP}"
appendJAMF_Computers_Local_Account_Info = eval(appendDataToCVS_JAMF_Computers_Local_Account_Info)
appendLocalAccountColumns = appendJAMF_Computers_Local_Account_Info
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, FileVault2Columns, appendLocalAccountColumns, computerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Computer_Group_Membership == 'yes':
##########################################################################################
# Get info for Computer Group Membership
##########################################################################################
#Get Info from record
computerGroupMembershipRecords = mycomputerRecordComputerGroupMembership
#Get Computer Group Info
for group in computerGroupMembershipRecords:
#Renew token because the report is a long process
#renew token
url = "https://iqvia.jamfcloud.com/api/v1/auth/keep-alive"
token = http.post(url, headers=btHeaders)
bearer = token.json()['token']
btHeaders = {
'Accept': 'application/json',
'Authorization': 'Bearer '+bearer
}
#only need group name for list
computerGroupMembershipName = group
#do look up for each name interation
# Lookup group info by computer id
url = JAMF_url + "/JSSResource/computergroups/name/" + computerGroupMembershipName
try:
computerGroupMembershipNameResponse = http.get(url, headers=btHeaders)
computerGroupMembershipNameResponse.raise_for_status()
resp = computerGroupMembershipNameResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#For Testing
#print(resp)
#Set Variables if Data Available
if len(str(resp['computer_group']['id'])) == 0:
mygroupMembershipId = ''
else:
mygroupMembershipId = int(resp['computer_group']['id'])
groupMembershipName = resp['computer_group']['name']
groupMembershipIsSmart = resp['computer_group']['is_smart']
appendDataToCVS_JAMF_Computers_Info_Computer_Group_Membership = "{'Type':'Computer Group Membership Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Group Membership Group ID':mygroupMembershipId,\
\
'Computer Group Membership Group Name':groupMembershipName,\
\
'Computer Group Membership Group Is Smart':groupMembershipIsSmart}"
appendJAMF_Computers_Info_Computer_Group_Membership = eval(appendDataToCVS_JAMF_Computers_Info_Computer_Group_Membership)
appendComputerGroupMembershipColumns = appendJAMF_Computers_Info_Computer_Group_Membership
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, FileVault2Columns, LocalAccountColumns, appendComputerGroupMembershipColumns, computerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
if get_JAMF_Computers_Info_Computer_Configuration_Profile_Membership == 'yes':
##########################################################################################
# Get info for Computer Configuration Profile Membership
##########################################################################################
#Get Info from record
computerConfigurationProfileMembership = mycomputerConfigurationProfileMembership
#Get Computer Group Info
for ConfigProfile in computerConfigurationProfileMembership:
#Renew token because the report is a long process
#renew token
url = "https://iqvia.jamfcloud.com/api/v1/auth/keep-alive"
token = http.post(url, headers=btHeaders)
bearer = token.json()['token']
btHeaders = {
'Accept': 'application/json',
'Authorization': 'Bearer '+bearer
}
if ConfigProfile['id'] > 0:
configurationProfileID = str(ConfigProfile['id'])
#For testing
#print(configurationProfileID)
# Set up url for getting information from each configurationProfile ID from JAMF API
url = JAMF_url + "/JSSResource/osxconfigurationprofiles/id/" + configurationProfileID
try:
computerConfigurationProfileMembershipResponse = http.get(url, headers=btHeaders)
computerConfigurationProfileMembershipResponse.raise_for_status()
resp = computerConfigurationProfileMembershipResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
continue
except Exception as err:
print(f'Other error occurred: {err}')
continue
#For Testing
#print(resp)
#General Element for ID and Catagory
myConfigurationProfileGeneral = resp['os_x_configuration_profile']['general']
myConfigurationProfileGeneralID = myConfigurationProfileGeneral['id']
myConfigurationProfileGeneralName = myConfigurationProfileGeneral['name']
print(myConfigurationProfileGeneral)
print(myConfigurationProfileGeneralID)
print(myConfigurationProfileGeneralName)
appendDataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership = "{'Type':'Computer Configuration Profile Membership Info',\
\
'Computer ID':mycomputerRecordGeneralID,\
\
'Computer Name':mycomputerRecordGeneral['name'],\
\
'Computer Configuration Profile Membership ID':myConfigurationProfileGeneralID,\
\
'Computer Configuration Profile Membership Name':myConfigurationProfileGeneralName}"
appendJAMF_Computers_Info_Computer_Configuration_Profile_Membership = eval(appendDataToCVS_JAMF_Computers_Info_Computer_Configuration_Profile_Membership)
appendComputerConfigurationProfileMembershipColumns = appendJAMF_Computers_Info_Computer_Configuration_Profile_Membership
#Set Columns
Combined = MergeComputersInfo(computerColumns, hardwareColumns, FileVault2Columns, LocalAccountColumns, computerGroupMembershipColumns, appendComputerConfigurationProfileMembershipColumns)
#Set CSV File
dataToCsvComputers.append(Combined)
##################################################
# Process Requested Info for Policies
##################################################
if get_JAMF_Policy_Info == ("yes"):
# Set up url for getting a list of all policies from JAMF API
url = JAMF_url + "/JSSResource/policies"
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
resp = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(response.json())
policies = resp['policies']
policies.sort(key=lambda item: item.get('id'), reverse=False)
# Process Policy List and get information linked to policies
for policy in policies:
# Get Policy ID to do JAMF API lookup
PolicyID = str(policy['id'])
# For Testing
#print(PolicyID)
# Set up url for getting information from each policy ID from JAMF API
url = JAMF_url + "/JSSResource/policies/id/" + PolicyID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getPolicy = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(getPolicy)
#General Element for ID and Catagory
myPolicyGeneral = getPolicy['policy']['general']
myPolicyGeneralCatagory = getPolicy['policy']['general']['category']
#Scope Element for Computer Targets
myPolicyScopeTargetsAllComputers = getPolicy['policy']['scope']['all_computers']
myPolicyScopeTargetsComputers = getPolicy['policy']['scope']['computers']
myPolicyScopeTargetsComputerGroups = getPolicy['policy']['scope']['computer_groups']
#Scope Element For Limitation
#myPolicyScopeLimitationsUsers = getPolicy['policy']['scope']['limitations']['users']
#myPolicyScopeLimitationsUserGroups = getPolicy['policy']['scope']['limitations']['user_groups']
#Scope Element For Exclusions
myPolicyScopeExclusionsComputers = getPolicy['policy']['scope']['exclusions']['computers']
myPolicyScopeExclusionsComputerGroups = getPolicy['policy']['scope']['exclusions']['computer_groups']
#Package Element
myPackagesInfo = getPolicy['policy']['package_configuration']['packages']
#Script Elements
myScriptInfo = getPolicy['policy']['scripts']
#SelfService Element
mySelfServiceInfo = getPolicy['policy']['self_service']
useForSelfService = str(mySelfServiceInfo['use_for_self_service'])
##########################################################################################
# Process Policy information for csv / Excel
##########################################################################################
# Individual Policy Info for each record
getMyPolicyID = (str(myPolicyGeneral['id']) + " - " + myPolicyGeneral['name'])
getMyPolicyGeneralCatagory = (str(myPolicyGeneralCatagory['id']) + " - " + myPolicyGeneralCatagory['name'])
# Get info for Policies
print("Working on Policy ID: " + getMyPolicyID)
#Set Variables if Data Available
if len(str(myPolicyGeneral['id'])) == 0:
myPolicyGeneralID = ''
else:
myPolicyGeneralID = int(myPolicyGeneral['id'])
if len(str(myPolicyGeneralCatagory['id'])) == 0:
myPolicyGeneralCatagoryID = ''
else:
myPolicyGeneralCatagoryID = int(myPolicyGeneralCatagory['id'])
#Get Catagory name and format for excel
formatMyPolicyGeneralCatagory = f"\"{myPolicyGeneralCatagory['name']}\""
# Set Variables for Dict for Policy Info
appendDataToCVS_JAMF_Policy_Info = "{'Type':'Policy',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory}"
appendJAMF_Policy_Info = eval(appendDataToCVS_JAMF_Policy_Info)
appendPolicyColumns = appendJAMF_Policy_Info
#Set Columns
Combined = MergePolicyInfo(appendPolicyColumns, selfServiceColumns, targetsColumns, exclusionColumns, packageColumns, scriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
if get_JAMF_Policy_Info_SelfService == ("yes"):
if useForSelfService == 'True':
##########################################################################################
# Get Info for Self Service
##########################################################################################
# Set Variables for Dict for Policy Info
appendDataToCVS_JAMF_Policy_SelfService_Info = "{'Type':'Policy Self Service Info',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory,\
\
'Policy In SelfService':str(mySelfServiceInfo['use_for_self_service']),\
\
'Policy In SelfService Name':mySelfServiceInfo['self_service_display_name']}"
appendJAMF_Policy_SelfService_Info = eval(appendDataToCVS_JAMF_Policy_SelfService_Info)
appendSelfServiceColumns = appendJAMF_Policy_SelfService_Info
#Set Columns
Combined = MergePolicyInfo(policyColumns, appendSelfServiceColumns, targetsColumns, exclusionColumns, packageColumns, scriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
if get_JAMF_Policy_Info_Targets == ("yes"):
##########################################################################################
# Get info for Target Computers
##########################################################################################
for computer in myPolicyScopeTargetsComputers:
#Set Variables if Data Available
if len(str(computer['id'])) == 0:
computerID = ''
else:
computerID = int(computer['id'])
appendDataToCVS_JAMF_Policy_Target_Info = "{'Type':'Policy Computer Targets',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory,\
\
'Policy Target All Computers':str(myPolicyScopeTargetsAllComputers),\
\
'Policy Target Computer ID':computerID,\
\
'Policy Target Computer Name':computer['name']}"
appendJAMF_Policy_Target_Info = eval(appendDataToCVS_JAMF_Policy_Target_Info)
appendtargetsColumns = appendJAMF_Policy_Target_Info
#Set Columns
Combined = MergePolicyInfo(policyColumns, selfServiceColumns, appendtargetsColumns, exclusionColumns, packageColumns, scriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
##########################################################################################
# Get Info for Target Computer Groups
##########################################################################################
for target in myPolicyScopeTargetsComputerGroups:
targetGroupID = str(target['id'])
#Get Group Info from JAMF API
url = JAMF_url + "/JSSResource/computergroups/id/" + targetGroupID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getTargetGroupData = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#Computer Group Element for Target Groups
myTargetsComputerGroupInfo = getTargetGroupData['computer_group']
#Set Variables if Data Available
if len(str(myTargetsComputerGroupInfo['id'])) == 0:
myTargetsComputerGroupInfoID = ''
else:
myTargetsComputerGroupInfoID = int(myTargetsComputerGroupInfo['id'])
appendDataToCVS_JAMF_Policy_Target_Group_Info = "{'Type':'Policy Computer Target Group',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory,\
\
'Policy Target Group ID':myTargetsComputerGroupInfoID,\
\
'Policy Target Group Name':myTargetsComputerGroupInfo['name'],\
\
'Policy Target Group is Smart':str(myTargetsComputerGroupInfo['is_smart'])}"
appendJAMF_Policy_Target_Group_Info = eval(appendDataToCVS_JAMF_Policy_Target_Group_Info)
appendtargetsGroupsColumns = appendJAMF_Policy_Target_Group_Info
#Set Columns
Combined = MergePolicyInfo(policyColumns, selfServiceColumns, appendtargetsGroupsColumns, exclusionColumns, packageColumns, scriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
if get_JAMF_Policy_Info_Exclusions == ("yes"):
##########################################################################################
# Get info for exclusion Computers
##########################################################################################
for exclusion in myPolicyScopeExclusionsComputers:
#Set Variables if Data Available
if len(str(exclusion['id'])) == 0:
exclusionID = ''
else:
exclusionID = int(exclusion['id'])
appendDataToCVS_JAMF_Policy_Exclusion_Info = "{'Type':'Policy Computer Exclusions',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory,\
\
'Policy Exclusion Computer ID':exclusionID,\
\
'Policy Exclusion Computer Name':exclusion['name']}"
appendJAMF_Policy_Exclusion_Info = eval(appendDataToCVS_JAMF_Policy_Exclusion_Info)
appendExclusionColumns = appendJAMF_Policy_Exclusion_Info
#Set Columns
Combined = MergePolicyInfo(policyColumns, selfServiceColumns, targetsColumns, appendExclusionColumns, packageColumns, scriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
##########################################################################################
# Get Info for Computer Exclusions groups
##########################################################################################
for exclusion in myPolicyScopeExclusionsComputerGroups:
exclusionGroupID = str(exclusion['id'])
#Get Group Info from JAMF API
url = JAMF_url + "/JSSResource/computergroups/id/" + exclusionGroupID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getExclusionGroupData = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
myExclusionsComputerGroupInfo = getExclusionGroupData['computer_group']
#Set Variables if Data Available
if len(str(myExclusionsComputerGroupInfo['id'])) == 0:
myExclusionsComputerGroupInfoID = ''
else:
myExclusionsComputerGroupInfoID = int(myExclusionsComputerGroupInfo['id'])
appendDataToCVS_JAMF_Policy_Exclusion_Group_Info = "{'Type':'Policy Computer Exclusions Group',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory,\
\
'Policy Exclusion Group id':myExclusionsComputerGroupInfoID,\
\
'Policy Exclusion Group Name':myExclusionsComputerGroupInfo['name'],\
\
'Policy Exclusion Group is Smart':str(myExclusionsComputerGroupInfo['is_smart'])}"
appendJAMF_Policy_Exclusion_Info = eval(appendDataToCVS_JAMF_Policy_Exclusion_Group_Info)
appendExclusionGroupsColumns = appendJAMF_Policy_Exclusion_Info
#Set Columns
Combined = MergePolicyInfo(policyColumns, selfServiceColumns, targetsColumns, appendExclusionGroupsColumns, packageColumns, scriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
if get_JAMF_Policy_Info_Packages == ("yes"):
##########################################################################################
#Get Info for Packages in Policy
##########################################################################################
for package in myPackagesInfo:
packageID = str(package['id'])
#Get Group Info from JAMF API
url = JAMF_url + "/JSSResource/packages/id/" + packageID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getPackageData = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
myPackageInfo = getPackageData['package']
formatMyPackageInfoCatagory = f"\"{myPackageInfo['category']}\""
#Set Variables if Data Available
if len(str(myPackageInfo['id'])) == 0:
myPackageInfoID = ''
else:
myPackageInfoID = int(myPackageInfo['id'])
appendDataToCVS_JAMF_Policy_Packages_Info = "{'Type':'Policy Package',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory,\
\
'Policy Package ID':myPackageInfoID,\
\
'Policy Package Name':myPackageInfo['name'],\
\
'Policy Package Category Name':formatMyPackageInfoCatagory,\
\
'Policy Package Filename':myPackageInfo['filename']}"
appendJAMF_Policy_Packages_Info = eval(appendDataToCVS_JAMF_Policy_Packages_Info)
appendPackageColumns = appendJAMF_Policy_Packages_Info
#Set Columns
Combined = MergePolicyInfo(policyColumns, selfServiceColumns, targetsColumns, exclusionColumns, appendPackageColumns, scriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
if get_JAMF_Policy_Info_Scripts == ("yes"):
##########################################################################################
#Get Info for scripts in Policy
##########################################################################################
for script in myScriptInfo:
scriptID = str(script['id'])
#Get Group Info from JAMF API
url = JAMF_url + "/JSSResource/scripts/id/" + scriptID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getScriptData = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
myScriptInfo = getScriptData['script']
formatMyScriptsinfoCatagory = f"\"{myScriptInfo['category']}\""
#Set Variables if Data Available
if len(str(myScriptInfo['id'])) == 0:
myScriptInfoID = ''
else:
myScriptInfoID = int(myScriptInfo['id'])
appendDataToCVS_JAMF_Policy_Scripts_Info = "{'Type':'Policy Scripts',\
\
'Policy ID':myPolicyGeneralID,\
\
'Policy Name':myPolicyGeneral['name'],\
\
'Policy Category ID':myPolicyGeneralCatagoryID,\
\
'Policy Category Name':formatMyPolicyGeneralCatagory,\
\
'Policy Script ID':myScriptInfoID,\
\
'Policy Script Name':myScriptInfo['name'],\
\
'Policy Script Category Name':formatMyScriptsinfoCatagory,\
\
'Policy Script Filename':myScriptInfo['filename']}"
appendJAMF_Policy_Scripts_Info = eval(appendDataToCVS_JAMF_Policy_Scripts_Info)
appendScriptsColumns = appendJAMF_Policy_Scripts_Info
#Set Columns
Combined = MergePolicyInfo(policyColumns, selfServiceColumns, targetsColumns, exclusionColumns, packageColumns, appendScriptsColumns)
#Set CSV File
dataToCsvPolicy.append(Combined)
##########################################################################################
# Configuration Profiles Section
##########################################################################################
if get_JAMF_Configuration_Profile_Info == ("yes"):
##########################################################################################
# Process Configuration Profilesinformation for csv / Excel
##########################################################################################
# Set up url for getting a list of all Configuration Profiles from JAMF API
url = JAMF_url + "/JSSResource/osxconfigurationprofiles"
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
resp = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(response.json())
configurationProfiles = resp['os_x_configuration_profiles']
configurationProfiles.sort(key=lambda item: item.get('id'), reverse=False)
# Process Configuration Profile List and get information linked to Configuration Profiles
for configurationProfile in configurationProfiles:
# Get configurationProfile ID to do JAMF API lookup
configurationProfileID = str(configurationProfile['id'])
#For Testing
#print(configurationProfileID)
# Set up url for getting information from each configurationProfile ID from JAMF API
url = JAMF_url + "/JSSResource/osxconfigurationprofiles/id/" + configurationProfileID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getConfigurationProfile = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(getConfigurationProfile)
#General Element for ID and Catagory
myConfigurationProfileGeneral = getConfigurationProfile['os_x_configuration_profile']['general']
myConfigurationProfileGeneralCatagory = getConfigurationProfile['os_x_configuration_profile']['general']['category']
#Scope Element for Computer Targets
myConfigurationProfileScopeTargetsAllComputers = getConfigurationProfile['os_x_configuration_profile']['scope']['all_computers']
myConfigurationProfileScopeTargetsComputers = getConfigurationProfile['os_x_configuration_profile']['scope']['computers']
myConfigurationProfileScopeTargetsComputerGroups = getConfigurationProfile['os_x_configuration_profile']['scope']['computer_groups']
#Scope Element For Limitation
myConfigurationProfileScopeLimitationsUsers = getConfigurationProfile['os_x_configuration_profile']['scope']['limitations']['users']
myConfigurationProfileScopeLimitationsUserGroups = getConfigurationProfile['os_x_configuration_profile']['scope']['limitations']['user_groups']
#Scope Element For Exclusions
myConfigurationProfileScopeExclusionsComputers = getConfigurationProfile['os_x_configuration_profile']['scope']['exclusions']['computers']
myConfigurationProfileScopeExclusionsComputerGroups = getConfigurationProfile['os_x_configuration_profile']['scope']['exclusions']['computer_groups']
##########################################################################################
# Process ConfigurationProfile information for csv / Excel
##########################################################################################
# Individual ConfigurationProfile Info for each record
getMyConfigurationProfileID = (str(myConfigurationProfileGeneral['id']) + " - " + myConfigurationProfileGeneral['name'])
getMyConfigurationProfileGeneralCatagory = (str(myConfigurationProfileGeneralCatagory['id']) + " - " + myConfigurationProfileGeneralCatagory['name'])
# Get info for Policies
print("Working on Configuration Profile ID: " + getMyConfigurationProfileID)
formatMyConfigurationProfileGeneralCatagory = f"\"{myConfigurationProfileGeneralCatagory['name']}\""
#Set Variables if Data Available
if len(str(myConfigurationProfileGeneral['id'])) == 0:
myConfigurationProfileGeneralID = ''
else:
myConfigurationProfileGeneralID = int(myConfigurationProfileGeneral['id'])
#Set Variables if Data Available
if len(str(myConfigurationProfileGeneralCatagory['id'])) == 0:
myConfigurationProfileGeneralCatagoryID = ''
else:
myConfigurationProfileGeneralCatagoryID = int(myConfigurationProfileGeneralCatagory['id'])
# Set Variables for Dict for Configuration Profile Info
appendDataToCVS_JAMF_Configuration_Profile_Info = "{'Configuration Profile Type':'Configuration Profile',\
\
'Configuration Profile ID':myConfigurationProfileGeneralID,\
\
'Configuration Profile Name':myConfigurationProfileGeneral['name'],\
\
'Configuration Profile Category ID':myConfigurationProfileGeneralCatagoryID,\
\
'Configuration Profile Category Name':formatMyConfigurationProfileGeneralCatagory}"
appendJAMF_Configuration_Profile_Info = eval(appendDataToCVS_JAMF_Configuration_Profile_Info)
appendConfigProfileColumns = appendJAMF_Configuration_Profile_Info
#Set Columns
Combined = MergeConfigProfileInfo(appendConfigProfileColumns, configProfileTargetsColumns, configProfileExclusionsColumns)
#Set CSV File
dataToCsvConfigurationProfile.append(Combined)
if get_JAMF_Configuration_Profile_Info_Targets == ("yes"):
##########################################################################################
# Get info for Target Computers
##########################################################################################
for computer in myConfigurationProfileScopeTargetsComputers:
appendDataToCVS_JAMF_Configuration_Profile_Target_Info = "{'Configuration Profile Type':'Configuration Profile Target Computer',\
\
'Configuration Profile ID':myConfigurationProfileGeneralID,\
\
'Configuration Profile Name':myConfigurationProfileGeneral['name'],\
\
'Configuration Profile Category ID':myConfigurationProfileGeneralCatagoryID,\
\
'Configuration Profile Category Name':formatMyConfigurationProfileGeneralCatagory,\
\
'Configuration Profile Target Computer ID':computer['id'],\
\
'Configuration Profile Target Computer Name':computer['name']}"
appendJAMF_Configuration_Profile_Target_Info = eval(appendDataToCVS_JAMF_Configuration_Profile_Target_Info)
appendConfigProfileTargetsColumns = appendJAMF_Configuration_Profile_Target_Info
#Set Columns
Combined = MergeConfigProfileInfo(configProfileColumns, appendConfigProfileTargetsColumns, configProfileExclusionsColumns)
#Set CSV File
dataToCsvConfigurationProfile.append(Combined)
##########################################################################################
# Get Info for Target Computer Groups
##########################################################################################
for target in myConfigurationProfileScopeTargetsComputerGroups:
targetGroupID = str(target['id'])
#Get Group Info from JAMF API
url = JAMF_url + "/JSSResource/computergroups/id/" + targetGroupID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getTargetGroupData = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#Computer Group Element for Target Groups
myTargetsComputerGroupInfo = getTargetGroupData['computer_group']
#Set Variables if Data Available
if len(str(myTargetsComputerGroupInfo['id'])) == 0:
myTargetsComputerGroupInfoID = ''
else:
myTargetsComputerGroupInfoID = int(myTargetsComputerGroupInfo['id'])
# Get info for Target Computer Group
appendDataToCVS_JAMF_Configuration_Profile_Target_Group_Info = "{'Configuration Profile Type':'Configuration Profile Target Computer Group',\
\
'Configuration Profile ID':myConfigurationProfileGeneralID,\
\
'Configuration Profile Name':myConfigurationProfileGeneral['name'],\
\
'Configuration Profile Category ID':myConfigurationProfileGeneralCatagoryID,\
\
'Configuration Profile Category Name':formatMyConfigurationProfileGeneralCatagory,\
\
'Configuration Profile Target Group ID':myTargetsComputerGroupInfoID,\
\
'Configuration Profile Target Group Name':myTargetsComputerGroupInfo['name'],\
\
'Configuration Profile Target Group is Smart':str(myTargetsComputerGroupInfo['is_smart'])}"
appendJAMF_Configuration_Profile_Target_Group_Info = eval(appendDataToCVS_JAMF_Configuration_Profile_Target_Group_Info)
appendConfigProfileTargetGroupsColumns = appendJAMF_Configuration_Profile_Target_Group_Info
#Set Columns
Combined = MergeConfigProfileInfo(configProfileColumns, appendConfigProfileTargetGroupsColumns, configProfileExclusionsColumns)
#Set CSV File
dataToCsvConfigurationProfile.append(Combined)
if get_JAMF_Configuration_Profile_Info_Exclusions == ("yes"):
##########################################################################################
# Get info for exclusion Computers
##########################################################################################
for exclusion in myConfigurationProfileScopeExclusionsComputers:
#Set Variables if Data Available
if len(str(exclusion['id'])) == 0:
exclusionID = ''
else:
exclusionID = int(exclusion['id'])
appendDataToCVS_JAMF_Configuration_Profile_Exclusion_Info = "{'Configuration Profile Type':'Configuration Profile Exclusion Computers',\
\
'Configuration Profile ID':myConfigurationProfileGeneralID,\
\
'Configuration Profile Name':myConfigurationProfileGeneral['name'],\
\
'Configuration Profile Category ID':myConfigurationProfileGeneralCatagoryID,\
\
'Configuration Profile Category Name':formatMyConfigurationProfileGeneralCatagory,\
\
'Configuration Profile Exclusion Computer id':exclusionID,\
\
'Configuration Profile Exclusion Computer Name':exclusion['name']}"
appendJAMF_Configuration_Profile_Exclusion_Info = eval(appendDataToCVS_JAMF_Configuration_Profile_Exclusion_Info)
appendConfigProfileExclusionsColumns = appendJAMF_Configuration_Profile_Exclusion_Info
#Set Columns
Combined = MergeConfigProfileInfo(configProfileColumns, configProfileTargetsColumns, appendConfigProfileExclusionsColumns)
#Set CSV File
dataToCsvConfigurationProfile.append(Combined)
##########################################################################################
#Get Info for Computer Exclusions groups
##########################################################################################
for exclusion in myConfigurationProfileScopeExclusionsComputerGroups:
exclusionGroupID = str(exclusion['id'])
#Get Group Info from JAMF API
url = JAMF_url + "/JSSResource/computergroups/id/" + exclusionGroupID
try:
response = http.get(url, headers=btHeaders)
response.raise_for_status()
getExclusionGroupData = response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
myExclusionsComputerGroupInfo = getExclusionGroupData['computer_group']
#Set Variables if Data Available
if len(str(myExclusionsComputerGroupInfo['id'])) == 0:
myExclusionsComputerGroupInfoID = ''
else:
myExclusionsComputerGroupInfoID = int(myExclusionsComputerGroupInfo['id'])
appendDataToCVS_JAMF_Configuration_Profile_Exclusion_Groups_Info = "{'Configuration Profile Type':'Configuration Profile Exclusion Computer Groups',\
\
'Configuration Profile ID':myConfigurationProfileGeneralID,\
\
'Configuration Profile Name':myConfigurationProfileGeneral['name'],\
\
'Configuration Profile Category ID':myConfigurationProfileGeneralCatagoryID,\
\
'Configuration Profile Category Name':formatMyConfigurationProfileGeneralCatagory,\
\
'Configuration Profile Exclusion Group id':myExclusionsComputerGroupInfoID,\
\
'Configuration Profile Exclusion Group Name':myExclusionsComputerGroupInfo['name'],\
\
'Configuration Profile Exclusion Group is Smart':str(myExclusionsComputerGroupInfo['is_smart'])}"
appendJAMF_Configuration_Profile_Exclusion_Groups_Info = eval(appendDataToCVS_JAMF_Configuration_Profile_Exclusion_Groups_Info)
appendConfigProfileExclusionsGroupsColumns = appendJAMF_Configuration_Profile_Exclusion_Groups_Info
#Set Columns
Combined = MergeConfigProfileInfo(configProfileColumns, configProfileTargetsColumns, appendConfigProfileExclusionsGroupsColumns)
#Set CSV File
dataToCsvConfigurationProfile.append(Combined)
##########################################################################################
# Package to Policies Section
##########################################################################################
if get_JAMF_Package_To_Policy_Info == ("yes"):
##########################################################################################
# Process Package to Policies information for csv / Excel
##########################################################################################
# Set up url for getting a list of all Package to Regular Policies from JAMF API
url = JAMF_url + "/JSSResource/policies"
# Set up list
policyPackagesList = []
try:
policyResponse = http.get(url, headers=btHeaders)
policyResponse.raise_for_status()
resp = policyResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#For Testing
#print(resp)
policyRecords = resp['policies']
policyRecords.sort(key=lambda item: item.get('id'), reverse=False)
for policy in policyRecords:
# Get Policy ID to do JAMF API lookup
policyRecordsID = str(policy['id'])
# For Testing
#print(policyRecordsID)
# Set up url for getting information from each policy ID from JAMF API
url = JAMF_url + "/JSSResource/policies/id/" + policyRecordsID
try:
PolicyRecordsResponse = http.get(url, headers=btHeaders)
PolicyRecordsResponse.raise_for_status()
getPolicyRecords = PolicyRecordsResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# For Testing
#print(getPolicyRecords)
#Get policy ID and Name for report
policyInfoID = getPolicyRecords['policy']['general']['id']
policyInfoName = getPolicyRecords['policy']['general']['name']
# Find the package data in each policy
policyPackageInfo = getPolicyRecords['policy']['package_configuration']['packages']
# Individual Policy Info for each record
getMyPolicyIDList = (str(policyInfoID) + " - " + policyInfoName)
# Get info for Policies
print("Gathering List for Package Info from Policy ID: " + getMyPolicyIDList)
#Get Package ID from Policy to compare and find unused packages.
for policyPackage in policyPackageInfo:
#get package info for dict
policyPackagesDict = {'Policy ID': policyInfoID, 'Policy Name': policyInfoName, 'Package ID': str(policyPackage['id'])}
policyPackagesList.append(policyPackagesDict)
#For testing
#print(policyPackagesList)
if includePreStagePackageToPolicyInfo == ("yes"):
##########################################################################################
# Process Package to PreStage Policies information for csv / Excel
##########################################################################################
# Set up url for getting a list of all Package to PreStage Policies from JAMF API
PSURL = JAMF_url + "/api/v2/computer-prestages"
preStagePolicyPackagesList = []
try:
preStagePolicyPackagesResponse = http.get(PSURL, headers=btHeaders)
preStagePolicyPackagesResponse.raise_for_status()
resp = preStagePolicyPackagesResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#For Testing
#print(resp)
preStagePolicies = resp['results']
for results in preStagePolicies:
preStagePoliciesID = results['id']
packages = results['customPackageIds']
preStagePoliciesDisplayName = results['displayName']
# Individual Policy Info for each record
getMyPreStagePolicyIDList = (str(preStagePoliciesID) + " - " + preStagePoliciesDisplayName)
# Get info for Policies
print("Gathering List for Package Info from PreStage Policy ID: " + getMyPreStagePolicyIDList)
for package in packages:
#print(package)
preStagePolicyPackagesDict = {'PreStage Policy ID': preStagePoliciesID, 'PreStage Policy Display Name': preStagePoliciesDisplayName, 'Package ID': package}
preStagePolicyPackagesList.append(preStagePolicyPackagesDict)
#print(preStagePolicyPackagesList)
##########################################################################################
# lookup package information and compair to dict and list to find what is in use.
##########################################################################################
url = JAMF_url + "/JSSResource/packages"
try:
packageResponse = http.get(url, headers=btHeaders)
packageResponse.raise_for_status()
resp = packageResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#For Testing
#print(resp)
packageRecords = resp['packages']
packageRecords.sort(key=lambda item: item.get('id'), reverse=False)
#print(packageRecords)
#process package records and set dict and list
for package in packageRecords:
packageRecordsID = package['id']
packageRecordsName = package['name']
key = 'Package ID'
value = str(packageRecordsID)
# Individual Policy Info for each record
getMyPackageList = (str(packageRecordsID) + " - " + packageRecordsName)
# Get info for Policies
print("Checking Policies that use Package: " + getMyPackageList)
#for testing
#print(packageRecordsID)
#print(policyPackagesList)
#print(type(value))
#Process Info for packages to policies
if checkIfPackageIsUsedInPolicy(preStagePolicyPackagesList, key, value) and checkIfPackageIsUsedInPolicy(policyPackagesList, key, value):
for policy in policyPackagesList:
policyPackageID = policy['Package ID']
checkPolicyListID = str(policyPackageID)
checkPackageRecordsID = str(packageRecordsID)
if checkPolicyListID == checkPackageRecordsID:
# Set up url for getting information from each policy ID from JAMF API
url = JAMF_url + "/JSSResource/packages/id/" + str(packageRecordsID)
try:
myPackageRecordsResponse = http.get(url, headers=btHeaders)
myPackageRecordsResponse.raise_for_status()
getMyPackageRecords = myPackageRecordsResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# for testing
#print(getMyPackageRecords['package']['id'])
#Set Variables if Data Available
if len(str(getMyPackageRecords['package']['id'])) == 0:
myCurrentPackageID = ''
else:
myCurrentPackageID = int(getMyPackageRecords['package']['id'])
myCurrentPackageName = getMyPackageRecords['package']['name']
myPackageRecordsFileName = getMyPackageRecords['package']['filename']
if len(str(policy['Policy ID'])) == 0:
myCurrentPolicyID = ''
else:
myCurrentPolicyID = int(policy['Policy ID'])
myCurrentPolicyName = policy['Policy Name']
appendDataToCVS_JAMF_Package_To_Regular_Policy_Info = "{'Type':'Package Used',\
\
'Package List':'Regular Policy',\
\
'Package ID':myCurrentPackageID,\
\
'Package Name':myCurrentPackageName,\
\
'Package File Name':myPackageRecordsFileName,\
\
'Policy ID':myCurrentPolicyID,\
\
'Policy Name':myCurrentPolicyName}"
appendJAMF_Package_To_Regular_Policy_Info = eval(appendDataToCVS_JAMF_Package_To_Regular_Policy_Info)
appendPackageToRegularPolicyColumns = appendJAMF_Package_To_Regular_Policy_Info
#Set Columns
Combined = appendPackageToRegularPolicyColumns
#Set CSV File
dataToCsvPackageToPolicy.append(Combined)
# For Testing
#print(f"Yes, Package ID: " + myCurrentPackageID + " with Package Name: " + myCurrentPackageName + " and Package File Name: " + myPackageRecordsFileName + ", is being used by Policy ID: " + str(myCurrentPolicyID) + " with Policy Name: " + myCurrentPolicyName)
for preStagePolicy in preStagePolicyPackagesList:
preStagePolicyPackageID = preStagePolicy['Package ID']
checkPreStagePolicyListID = str(preStagePolicyPackageID)
checkPackageRecordsID = str(packageRecordsID)
if checkPreStagePolicyListID == checkPackageRecordsID:
# Set up url for getting information from each policy ID from JAMF API
url = JAMF_url + "/JSSResource/packages/id/" + str(packageRecordsID)
try:
myPackageRecordsResponse = http.get(url, headers=headers, auth = HTTPBasicAuth('jamf-api', '<PASSWORD>$'))
myPackageRecordsResponse.raise_for_status()
getMyPackageRecords = myPackageRecordsResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#print(getMyPackageRecords['package']['id'])
#Set Variables if Data Available
if len(str(getMyPackageRecords['package']['id'])) == 0:
myCurrentPackageID = ''
else:
myCurrentPackageID = int(getMyPackageRecords['package']['id'])
myCurrentPackageName = getMyPackageRecords['package']['name']
myPackageRecordsFileName = getMyPackageRecords['package']['filename']
if len(str(preStagePolicy['PreStage Policy ID'])) == 0:
myCurrentPreStagePolicyID = ''
else:
myCurrentPreStagePolicyID = int(preStagePolicy['PreStage Policy ID'])
myCurrentPreStagePolicyName = preStagePolicy['PreStage Policy Display Name']
appendDataToCVS_JAMF_Package_To_PreStage_Policy_Info = "{'Type':'Package Used',\
\
'Package List':'PreStage Policy',\
\
'Package ID':myCurrentPackageID,\
\
'Package Name':myCurrentPackageName,\
\
'Package File Name':myPackageRecordsFileName,\
\
'PreStage Policy ID':myCurrentPreStagePolicyID,\
\
'PreStage Policy Name':myCurrentPreStagePolicyName}"
appendJAMF_Package_To_PreStage_Policy_Info = eval(appendDataToCVS_JAMF_Package_To_PreStage_Policy_Info)
appendPackageToPreStagePolicyColumns = appendJAMF_Package_To_PreStage_Policy_Info
#Set Columns
Combined = appendPackageToPreStagePolicyColumns
#Set CSV File
dataToCsvPackageToPolicy.append(Combined)
# For Testing
#print(f"Yes, Package ID: " + myCurrentPackageID + " with Package Name: " + myCurrentPackageName + " and Package FileName: "+ myPackageRecordsFileName + " is being used in PreStage Policies ID: " + myCurrentPreStagePolicyID + " with PreStage Display Name: " + myCurrentPreStagePolicyName)
elif checkIfPackageIsUsedInPolicy(policyPackagesList, key, value):
for policy in policyPackagesList:
policyPackageID = policy['Package ID']
checkPolicyListID = str(policyPackageID)
checkPackageRecordsID = str(packageRecordsID)
if checkPolicyListID == checkPackageRecordsID:
# Set up url for getting information from each policy ID from JAMF API
url = JAMF_url + "/JSSResource/packages/id/" + str(packageRecordsID)
try:
myPackageRecordsResponse = http.get(url, headers=btHeaders)
myPackageRecordsResponse.raise_for_status()
getMyPackageRecords = myPackageRecordsResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# for testing
#print(getMyPackageRecords['package']['id'])
#Set Variables if Data Available
if len(str(getMyPackageRecords['package']['id'])) == 0:
myCurrentPackageID = ''
else:
myCurrentPackageID = int(getMyPackageRecords['package']['id'])
myCurrentPackageName = getMyPackageRecords['package']['name']
myPackageRecordsFileName = getMyPackageRecords['package']['filename']
if len(str(policy['Policy ID'])) == 0:
myCurrentPolicyID = ''
else:
myCurrentPolicyID = int(policy['Policy ID'])
myCurrentPolicyName = policy['Policy Name']
appendDataToCVS_JAMF_Package_To_Regular_Policy_Info = "{'Type':'Package Used',\
\
'Package List':'Regular Policy',\
\
'Package ID':myCurrentPackageID,\
\
'Package Name':myCurrentPackageName,\
\
'Package File Name':myPackageRecordsFileName,\
\
'Policy ID':myCurrentPolicyID,\
\
'Policy Name':myCurrentPolicyName}"
appendJAMF_Package_To_Regular_Policy_Info = eval(appendDataToCVS_JAMF_Package_To_Regular_Policy_Info)
appendPackageToRegularPolicyColumns = appendJAMF_Package_To_Regular_Policy_Info
#Set Columns
Combined = appendPackageToRegularPolicyColumns
#Set CSV File
dataToCsvPackageToPolicy.append(Combined)
# For Testing
#print(f"Yes, Package ID: " + myCurrentPackageID + " with Package Name: " + myCurrentPackageName + " and Package File Name: " + myPackageRecordsFileName + ", is being used by Policy ID: " + str(myCurrentPolicyID) + " with Policy Name: " + myCurrentPolicyName)
elif checkIfPackageIsUsedInPolicy(preStagePolicyPackagesList, key, value):
for preStagePolicy in preStagePolicyPackagesList:
preStagePolicyPackageID = preStagePolicy['Package ID']
checkPreStagePolicyListID = str(preStagePolicyPackageID)
checkPackageRecordsID = str(packageRecordsID)
if checkPreStagePolicyListID == checkPackageRecordsID:
# Set up url for getting information from each policy ID from JAMF API
url = JAMF_url + "/JSSResource/packages/id/" + str(packageRecordsID)
try:
myPackageRecordsResponse = http.get(url, headers=btHeaders)
myPackageRecordsResponse.raise_for_status()
getMyPackageRecords = myPackageRecordsResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
#print(getMyPackageRecords['package']['id'])
#print(getMyPackageRecords)
#Set Variables if Data Available
if len(str(getMyPackageRecords['package']['id'])) == 0:
myCurrentPackageID = ''
else:
myCurrentPackageID = int(getMyPackageRecords['package']['id'])
myCurrentPackageName = getMyPackageRecords['package']['name']
myPackageRecordsFileName = getMyPackageRecords['package']['filename']
if len(str(preStagePolicy['PreStage Policy ID'])) == 0:
myCurrentPreStagePolicyID = ''
else:
myCurrentPreStagePolicyID = int(preStagePolicy['PreStage Policy ID'])
myCurrentPreStagePolicyName = preStagePolicy['PreStage Policy Display Name']
appendDataToCVS_JAMF_Package_To_PreStage_Policy_Info = "{'Type':'Package Used',\
\
'Package List':'PreStage Policy',\
\
'Package ID':myCurrentPackageID,\
\
'Package Name':myCurrentPackageName,\
\
'Package File Name':myPackageRecordsFileName,\
\
'PreStage Policy ID':myCurrentPreStagePolicyID,\
\
'PreStage Policy Name':myCurrentPreStagePolicyName}"
appendJAMF_Package_To_PreStage_Policy_Info = eval(appendDataToCVS_JAMF_Package_To_PreStage_Policy_Info)
appendPackageToPreStagePolicyColumns = appendJAMF_Package_To_PreStage_Policy_Info
#Set Columns
Combined = appendPackageToPreStagePolicyColumns
#Set CSV File
dataToCsvPackageToPolicy.append(Combined)
# For Testing
#print(f"Yes, Package ID: " + myCurrentPackageID + " with Package Name: " + myCurrentPackageName + " and Package FileName: "+ myPackageRecordsFileName + " is being used in PreStage Policies ID: " + myCurrentPreStagePolicyID + " with PreStage Display Name: " + myCurrentPreStagePolicyName)
else:
# Set up url for getting information from each policy ID from JAMF API
url = JAMF_url + "/JSSResource/packages/id/" + str(packageRecordsID)
try:
myPackageRecordsResponse = http.get(url, headers=btHeaders)
myPackageRecordsResponse.raise_for_status()
getMyPackageRecords = myPackageRecordsResponse.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# for testing
#print(getMyPackageRecords['package']['id'])
#Set Variables if Data Available
if len(str(getMyPackageRecords['package']['id'])) == 0:
myUnusedCurrentPackageID = ''
else:
myUnusedCurrentPackageID = int(getMyPackageRecords['package']['id'])
myUnusedPackageName = getMyPackageRecords['package']['name']
myUnusedPackageRecordsFileName = getMyPackageRecords['package']['filename']
appendDataToCVS_JAMF_Package_Unused_Info = "{'Type':'Package Not Used',\
\
'Package List':'',\
\
'Package ID':myUnusedCurrentPackageID,\
\
'Package Name':myUnusedPackageName,\
\
'Package File Name':myUnusedPackageRecordsFileName}"
appendJAMF_Package_Unused_Info = eval(appendDataToCVS_JAMF_Package_Unused_Info)
appendPackageUnusedColumns = appendJAMF_Package_Unused_Info
#Set Columns
Combined = appendPackageUnusedColumns
#Set CSV File
dataToCsvPackageToPolicy.append(Combined)
#print(f"No, Package ID: " + str(packageRecordsID) + ", Package Name: " + packageRecordsName + " is not being used in any Policies")
##########################################################################################
# Process data for Export to csv / Excel
##########################################################################################
# Check and make sure that either Policy or Config Profile was selected
if get_JAMF_Computers_Info == 'yes' or get_JAMF_Policy_Info == 'yes' or get_JAMF_Configuration_Profile_Info == 'yes' or get_JAMF_Package_To_Policy_Info == 'yes':
# Get export to csv file
if get_JAMF_Computers_Info == ("yes"):
df_computers = pd.DataFrame(dataToCsvComputers)
if get_JAMF_Policy_Info == ("yes"):
df_policy =
|
pd.DataFrame(dataToCsvPolicy)
|
pandas.DataFrame
|
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index =
|
pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
|
pandas.date_range
|
import pandas as pd
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
from sortedcontainers import SortedDict
import warnings
import math
from copy import deepcopy
from tqdm import tqdm
import matplotlib as mpl
from collections import Counter
warnings.filterwarnings('ignore')
pd.options.display.max_rows = 10000
import matplotlib.dates as mdates
from config_default import *
from helpers import *
import platform
from helpers import myround
def get_deal(raw):
active_exec = raw[raw["NO"] == "mm"]
passive_exec = raw[raw["NO"] != "mm"]
bid_deal = pd.concat([active_exec[active_exec["BUYSELL"] == "B"], \
passive_exec[passive_exec["BUYSELL"] == "S"]])
ask_deal = pd.concat([active_exec[active_exec["BUYSELL"] == "S"], \
passive_exec[passive_exec["BUYSELL"] == "B"]])
return bid_deal["TIME"] // 100000, ask_deal["TIME"] // 100000, bid_deal["PRICE"], ask_deal["PRICE"]
def p_l_calc(raw, fair_price, share):
# PL = Bought*(FP-AvgBid) + Sold*(AvgAsk-FP)
active_exec = raw[raw["NO"] == "mm"]
passive_exec = raw[raw["NO"] != "mm"]
bought_vol = sum(active_exec[active_exec["BUYSELL"] == "B"]["VOLUME"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "S"]["VOLUME"])
sold_vol = sum(active_exec[active_exec["BUYSELL"] == "S"]["VOLUME"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "B"]["VOLUME"])
if bought_vol >= 1:
avg_bought = (sum(active_exec[active_exec["BUYSELL"] == "B"]["TURNOVER"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "S"]["TURNOVER"])) / bought_vol
else:
avg_bought = 0
if sold_vol >= 1:
avg_sold = (sum(active_exec[active_exec["BUYSELL"] == "S"]["TURNOVER"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "B"]["TURNOVER"])) / sold_vol
else:
avg_sold = 0
return round(bought_vol * (fair_price - avg_bought) + sold_vol * (avg_sold - fair_price), 1), \
bought_vol, sold_vol, myround(avg_bought, base=price_step[share]), myround(avg_sold, base=price_step[share])
def algo_stats(raw, fair_price, first_trade, algo, date, trade_log, dom, share="LKOH", bid_ask=False, show_deals=False):
print("Stats for date -", date)
print("Algo Params: ")
print(algo.algo_params)
print()
print("Num of trades - ", raw.shape[0])
print("Algo turnover - ", round(sum(raw["PRICE"] * raw["VOLUME"]), 1))
p_l, bought_vol, sold_vol, avg_bought, avg_sold = p_l_calc(raw, fair_price, share)
print("P&L Gross - ", p_l)
print("P&L Net(with commision) -", round(p_l - sum(raw["PRICE"] * raw["VOLUME"]) * 0.00008, 1))
print("Num of bought - ", bought_vol)
print("Weighted average bought price - ", avg_bought)
print("Num of sold - ", sold_vol)
print("Weighted average sold price - ", avg_sold)
print("Open Price - ", first_trade)
print("Close price - ", dom.trade_log[-1][0])
print("Initial cash - ", algo.first_cash)
print("Total Return - ", round(100 * p_l / algo.first_cash, 2), "%", sep="")
mpl.style.use("seaborn")
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(20, 12))
axs[0, 0].set_title(
algo.name + ' - Algo Equity in % at ' + str(date)[:4] + "/" + str(date)[4:6] + "/" + str(date)[6:], size=16)
if algo.name[:4] != "Hard":
print("End Cash - ", round(algo.cash))
print("End Equity - ", round(algo.cash + (bought_vol - sold_vol) * fair_price))
print("Max day Drawdown - ", round((min(algo.equity) / algo.first_cash - 1) * 100, 2), "%", sep='')
#print(pd.to_datetime(pd.Series(algo.time).astype(str).str[:4], format="%H%M"))
axs[0, 0].plot(pd.to_datetime(pd.Series(algo.time).astype(str).str[:4], format="%H%M"),
np.array(algo.equity) / algo.first_cash * 100)
axs[0, 1].set_title(share + " Price at " + str(date)[:4] + "/" + str(date)[4:6] + "/" + str(date)[6:], size=16)
axs[1, 0].plot(pd.to_datetime(pd.Series(algo.time).astype(str).str[:4], format="%H%M"), np.array(algo.volume_lst))
else:
print("End Cash - ", round(algo.cash[share]))
print("End Equity - ", round(algo.cash[share] + (bought_vol - sold_vol) * fair_price))
print("Max day Drawdown - ", round((min(algo.equity[share]) / algo.first_cash - 1) * 100, 2), "%", sep='')
axs[0, 0].plot(pd.to_datetime(pd.Series(algo.time[share]).astype(str).str[:4], format="%H%M"),
np.array(algo.equity[share]) / algo.first_cash * 100)
axs[0, 1].set_title(share + " Bid/Ask Pricing at " + str(date)[:4] + "/" + str(date)[4:6] + "/" + str(date)[6:],
size=16)
axs[1, 0].plot(pd.to_datetime(pd.Series(algo.time[share]).astype(str).str[:4], format="%H%M"),
np.array(algo.volume_lst[share]))
axs[0, 1].plot(pd.to_datetime(trade_log["TIME"], format="%H%M%S%f"), trade_log["PRICE"])
if bid_ask:
if algo.name[:4] == "Hard":
axs[0, 1].plot(pd.to_datetime(
|
pd.Series(algo.time[share])
|
pandas.Series
|
import os
import sys
import warnings
import pickle
from IPython.core.display import display
import numpy as np
import pandas as pd
from generator_labeler.ActiveModel import RandomForestQuantileRegressor
from generator_labeler.FeatureExtraction import PredictorFeatureExtraction as PFE
from generator_labeler.Analysis.models import get_X_y
from generator_labeler.Analysis.GeneratedJobsAnalyzer import load_generated_dataset, load_validation_dataset
from generator_labeler.FeatureExtraction.PredictorFeatureExtraction import preprocess_jobs_data_info, fill_jobs_cardinality
from generator_labeler.JobExecutionSampler.unsupervised_sampler import UniformAgglomerativeSampler, RandomSampler
from generator_labeler.ActiveModel.ActiveQuantileForest import QuantileForestModel
from generator_labeler.paper_results import IMDB_config, TPCH_config, TPCH_config_2
import matplotlib.pyplot as plt
import seaborn as sns
# TDGen dependencies
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
sns.set_context("talk")
sns.set_style("whitegrid")
np.random.seed(42)
# np.random.seed(51)
def compute_data_plan_features(config):
args_params = config.parse_args(sys.argv)
jobs_data_info = preprocess_jobs_data_info(args_params["generatedJobsInfo"])
data_sizes = config.data_sizes
data_plan_features = []
for d_id in data_sizes:
data_f = fill_jobs_cardinality(jobs_data_info, data_size=d_id)
data_plan_features.append(data_f)
data_plan_features = pd.concat(data_plan_features)
with open(os.path.join(config.dest_folder, "data_plan_features.pkl"), "wb") as handle:
pickle.dump(data_plan_features, handle)
def load_data_and_preprocess(config, load_original_cards=False):
args_params = config.parse_args(sys.argv)
# Load dataset
if config.plan_data_features_path is not None:
print("#####################################################")
print("## WARNING! Loading pre-existing features dataset! ##")
print("#####################################################")
plan_data_features = pd.read_csv(config.plan_data_features_path).set_index(["plan_id", "data_id"])
else:
plan_data_features, exec_plans_graph = load_generated_dataset(args_params, data_sizes=config.data_sizes, load_original_cards=load_original_cards)
# Log of labels and features
plan_data_features["Log_netRunTime"] = np.log(plan_data_features["netRunTime"])
sourceCardinalitySum = plan_data_features["sourceCardinalitySum"].copy()
sourceCardinalitySum[sourceCardinalitySum == 0] = 1 # Solves a bug in uniform sampler, because log of 0 is minus inf
plan_data_features["Log_sourceCardinalitySum"] = np.log(sourceCardinalitySum)
return plan_data_features
def run(config, load_original_cards=False, random_sampling=False):
# Load plan_data_features
plan_data_features = load_data_and_preprocess(config, load_original_cards=load_original_cards)
# Persist features
plan_data_features.to_csv(os.path.join(config.dest_folder, "plan_data_features.csv"))
# Remove outliers
plan_data_features_no_out = PFE.remove_outliers(plan_data_features.copy(), "netRunTime", b=0.01)
plan_data_features_no_out.to_csv(os.path.join(config.dest_folder, "plan_data_features_no_out.csv"))
# Init learning process
df = plan_data_features_no_out.copy()
dev_df = df.copy()
test_df = df[~df.index.isin(dev_df.index)].copy()
if random_sampling:
print("Random init sampling...")
sample_model = RandomSampler(51, config.feature_cols, config.label_col, seed=42)
else:
sample_model = UniformAgglomerativeSampler(50, config.feature_cols, config.label_col, config.sample_col)
sample_model.fit(dev_df, verbose=True)
# save init_job_sample_ids
np.savetxt(os.path.join(config.dest_folder, "init_job_sample_ids.txt"), sample_model.sample_ids, fmt="%d")
train_data_df = sample_model.transform(dev_df.copy())
val_data_df = dev_df.loc[~dev_df.index.isin(train_data_df.index), :]
test_data_df = test_df.copy()
X_train, y_train = get_X_y(train_data_df, config.feature_cols, config.label_col)
ids_train = train_data_df.reset_index()[["plan_id", "data_id"]]
print("Train data:", X_train.shape)
X_test, y_test = get_X_y(val_data_df, config.feature_cols, config.label_col)
ids_test = val_data_df.reset_index()[["plan_id", "data_id"]]
print("Test data:", X_test.shape)
results = test_active_learning(X_train.copy(), y_train.copy(), ids_train.copy(),
X_test.copy(), y_test.copy(), ids_test.copy(),
config.feature_cols,
n_iter=config.n_iter,
verbose=True)
with open(os.path.join(config.dest_folder, "learning_process.pkl"), "wb") as handle:
pickle.dump(results, handle)
def active_learning_iteration(X_train, y_train, ids_train, X_test, y_test, ids_test, feature_cols, verbose=False):
results = {}
qf_model = QuantileForestModel(random_state=42)
qf_model.fit(X_train, y_train)
qf_model.cross_validate(X_train, y_train)
qf_model.validate(X_test, y_test)
y_pred = qf_model.predict(X_test)
y_pred_upper = qf_model.predict(X_test, quantile=75)
y_pred_lower = qf_model.predict(X_test, quantile=25)
if verbose:
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(y_test[p], marker=".", linewidth=1, label="y_true", color="#1f77b4")
ax.errorbar(np.arange(len(y_pred)), y_pred[p],
yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.',
color="#ff7f0e", label="Pred. interval")
ax.set_title(f"{type(qf_model).__name__} - Score[r2]: {qf_model.test_scores['r2']:.2f}")
ax.set_ylabel("Log(Runtime)")
ax.set_xlabel("Test jobs")
ax.legend()
# plt.show()
plt.close()
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(np.exp(y_test[p]), marker=".", linewidth=1, label="y_true", color="#1f77b4")
ax.errorbar(np.arange(len(y_pred)), np.exp(y_pred[p]), yerr=np.array(
[np.exp(y_pred[p]) - np.exp(y_pred_lower[p]), np.exp(y_pred_upper[p]) - np.exp(y_pred[p])]), linewidth=0.5,
fmt='.', color="#ff7f0e", label="Pred. interval")
ax.set_title(f"EXP - {type(qf_model).__name__} - Score[r2]: {qf_model.test_scores_exp['r2']:.2f}")
ax.set_ylabel("Runtime [ms]")
ax.set_xlabel("Test jobs")
ax.legend()
# plt.show()
plt.close()
display(pd.DataFrame({"Feature": feature_cols, "F. Importance": qf_model.model.feature_importances_}) \
.sort_values("F. Importance", ascending=False).head(15).style.background_gradient())
IQR_interval = qf_model.predict_model_uncertainty(X_test, verbose=True)
results["model"] = qf_model
results["train_ids"] = ids_train.to_dict()
results["test_ids"] = ids_test.to_dict()
results["train_labels"] = y_train
results["test_labels"] = y_test
results["pred_labels"] = y_pred
results["uncertainty_high"] = y_pred_upper
results["uncertainty_low"] = y_pred_lower
results["uncertainty_interval"] = IQR_interval
results["feature_importance"] = {"Feature": feature_cols, "F_Importance": qf_model.model.feature_importances_}
return results
def test_active_learning(X_train, y_train, ids_train, X_test, y_test, ids_test, feature_cols, n_iter=20, verbose=False, random_sampling=False):
warnings.filterwarnings("ignore")
data_size = []
test_scores = []
cross_validation_scores = []
test_scores_exp = []
cross_validation_scores_exp = []
IQRs_mean = []
iterations_results = []
for idx in range(n_iter):
print("======= Iteration", idx)
data_size.append(X_train.shape[0])
print("Train:", X_train.shape)
print("Test:", X_test.shape)
iter_res = active_learning_iteration(X_train, y_train, ids_train, X_test, y_test, ids_test, feature_cols, verbose=verbose)
if random_sampling:
IRQ_th = np.quantile(iter_res["uncertainty_interval"], 0.95)
len_new_X_train = len(X_test[iter_res["uncertainty_interval"] > IRQ_th])
sampling_idx = np.random.randint(0, len(X_test), len_new_X_train)
new_X_train = X_test[sampling_idx]
new_y_train = y_test[sampling_idx]
new_ids_train = ids_test.iloc[sampling_idx].copy()
else: # Sampling based on uncertainty threshold
IRQ_th = np.quantile(iter_res["uncertainty_interval"], 0.95)
new_X_train = X_test[iter_res["uncertainty_interval"] > IRQ_th]
new_y_train = y_test[iter_res["uncertainty_interval"] > IRQ_th]
new_ids_train = ids_test.iloc[iter_res["uncertainty_interval"] > IRQ_th].copy()
# update test
X_test = np.delete(X_test, np.where(iter_res["uncertainty_interval"] > IRQ_th), axis=0)
y_test = np.delete(y_test, np.where(iter_res["uncertainty_interval"] > IRQ_th), axis=0)
ids_test = ids_test[~ids_test.index.isin(new_ids_train.index)].copy()
# update train
X_train = np.concatenate([X_train, new_X_train])
y_train = np.concatenate([y_train, new_y_train])
ids_train = pd.concat([ids_train, new_ids_train])
# store info
test_scores.append(iter_res["model"].test_scores)
cross_validation_scores.append(iter_res["model"].cross_validation_scores)
test_scores_exp.append(iter_res["model"].test_scores_exp)
cross_validation_scores_exp.append(iter_res["model"].cross_validation_scores_exp)
IQRs_mean.append(np.mean(np.abs(iter_res["uncertainty_interval"])))
iter_res["model"] = str(iter_res["model"])
iterations_results.append(iter_res)
print("=====================================================")
results = {
"iterations": list(range(n_iter)),
"data_size": data_size,
"model_uncertainty": IQRs_mean,
"test_scores": test_scores,
"test_scores_exp": test_scores_exp,
"cross_validation_scores": cross_validation_scores,
"cross_validation_scores_exp": cross_validation_scores_exp,
"iterations_results": iterations_results
}
return results
def load_train_data(base_folder, experiment_id, iteration_to_show=2):
data_file = f"{base_folder}/{experiment_id}/learning_process.pkl"
features_file = f"{base_folder}/{experiment_id}/plan_data_features_no_out.csv"
with open(data_file, "rb") as handle:
learning_data = pickle.load(handle)
features_df = pd.read_csv(features_file)
labels = learning_data["iterations_results"][iteration_to_show]
train_df = pd.DataFrame(labels["train_ids"])
train_df["labels"] = labels["train_labels"]
test_df = pd.DataFrame(labels["test_ids"])
test_df["labels"] = labels["pred_labels"]
labels_df = pd.concat([train_df, test_df], ignore_index=True)
return
|
pd.merge(features_df, labels_df)
|
pandas.merge
|
import os
import pickle
import asyncio
import pandas as pd
from binance import Client
from typing import Iterable
from binance_bot_simulation.exchange_bots.strategy import Strategy
from common import mkdirs, timing
from binance_bot_simulation.exchange_bots.portfolio import InitialPortfolio
from binance_bot_simulation.simulation.simulation_exchange_bot import SimulationExchangeBot
from binance_bot_simulation.binance.binance_download_data import download_data, change_df_types
from common.plot import plot_simulation
class Simulation:
"""
The simulation class responsible obviously on the simulation.
it has a the simulation loop which is got over all the received data that it simulated and start sending the candles
in order, one by one using the simulator clock.
If more than 1 DataFrame has received as argument than the simulation of the graph will be simultaneously call them
"""
def __init__(self,
simulation_start_time: pd.Timestamp = None,
verbose=True):
self.verbose = verbose
self.simulation_data_feeds = {}
self.simulation_start_time = simulation_start_time
self.exchange = SimulationExchangeBot()
def create_portfolio(self, **coins):
self.exchange.create_portfolio(self.simulation_start_time, **coins)
@property
def portfolio(self):
return self.exchange.portfolio
def add_data_feed(self, coin: str, interval: str, data_feed: pd.DataFrame):
if coin not in self.simulation_data_feeds:
self.simulation_data_feeds[coin] = {}
if interval in self.simulation_data_feeds[coin]:
raise ValueError(f'{coin}/{interval} is already in the simulation.')
self.simulation_data_feeds[coin][interval] = data_feed.loc[data_feed.index >= self.simulation_start_time]
self.exchange.add_history(coin, interval, data_feed.loc[data_feed.index < self.simulation_start_time])
def add_strategy(self, strategy: Strategy):
self.exchange.set_strategy(strategy)
def start(self):
"""
start the simulation loop,
The simulation create a loop with tick on the smallest dataframe interval.
"""
loop = asyncio.get_event_loop()
loop.run_until_complete(self.async_start())
async def async_start(self):
await self.exchange.start()
concatenate_df = []
for coin, dfs in self.simulation_data_feeds.items():
concatenate_df += list(dfs.values())
concatenate_df =
|
pd.concat(concatenate_df)
|
pandas.concat
|
"""
Unit tests for dynophore.core.superfeature.SuperFeature class.
Uses fixture tests.conftest.superfeature.
"""
from pathlib import Path
import pytest
import pandas as pd
from dynophores import parsers
from dynophores.core.superfeature import SuperFeature
from dynophores.core.envpartner import EnvPartner
from dynophores.core.chemicalfeaturecloud3d import ChemicalFeatureCloud3D
PATH_TEST_DATA = Path(__name__).parent / "dynophores/tests/data"
class TestsSuperFeature:
"""
Test SuperFeature class methods.
"""
def test_init(self):
dynophore_dict = parsers._json_pml_to_dict(
PATH_TEST_DATA / "out/1KE7_dynophore.json",
PATH_TEST_DATA / "out/1KE7_dynophore.pml",
)
superfeature_dict = next(iter(dynophore_dict["superfeatures"].values()))
superfeature = SuperFeature(**superfeature_dict)
assert isinstance(superfeature, SuperFeature)
assert list(superfeature.__dict__) == [
"id",
"feature_type",
"atom_numbers",
"occurrences",
"envpartners",
"color",
"cloud",
]
# Test class attributes - check for data types
assert isinstance(superfeature.id, str)
assert isinstance(superfeature.feature_type, str)
assert isinstance(superfeature.atom_numbers[0], int)
assert isinstance(superfeature.occurrences[0], int)
assert isinstance(superfeature.envpartners, dict)
assert isinstance(next(iter(superfeature.envpartners.values())), EnvPartner)
assert isinstance(superfeature.color, str)
assert isinstance(superfeature.cloud, ChemicalFeatureCloud3D)
def test_envpartners_occurrences(self, superfeature):
"""
Test class property.
"""
data = superfeature.envpartners_occurrences
assert isinstance(data, pd.DataFrame)
assert data.index.to_list() == list(range(0, len(superfeature.occurrences)))
assert sorted(data.columns.to_list()) == sorted(
[envpartner.id for _, envpartner in superfeature.envpartners.items()]
)
assert data.dtypes.unique() == "int32"
@pytest.mark.parametrize(
"envpartners_collapsed",
[["ILE-10-A[169,171,172]", "PHE-82-A[1245,1246,1247,1248,1249,1250]"]],
)
def test_envpartners_occurrences_collapsed(self, superfeature, envpartners_collapsed):
"""
Test class property.
"""
data = superfeature.envpartners_occurrences_collapsed
assert isinstance(data, pd.DataFrame)
assert data.index.to_list() == list(range(0, len(superfeature.occurrences)))
assert sorted(data.columns.to_list()) == sorted(envpartners_collapsed)
assert data.dtypes.unique() == "int32"
def test_envpartners_distances(self, superfeature):
"""
Test class property.
"""
data = superfeature.envpartners_distances
assert isinstance(data, pd.DataFrame)
assert data.index.to_list() == list(range(0, len(superfeature.occurrences)))
assert sorted(data.columns.to_list()) == sorted(
[envpartner.id for _, envpartner in superfeature.envpartners.items()]
)
assert data.dtypes.unique() == "float64"
@pytest.mark.parametrize("data_type", ["xxx"])
def test_data_raises(self, superfeature, data_type):
"""
Test helper method to call envpartners_occurrences and envpartners_distances properties.
"""
with pytest.raises(KeyError):
superfeature._data(data_type)
@pytest.mark.parametrize("n_frames", [1002])
def test_n_frames(self, superfeature, n_frames):
"""
Test class property.
"""
assert superfeature.n_frames == n_frames
@pytest.mark.parametrize(
"count, frequency, envpartner_ids",
[
(
[1001, 995, 945, 57],
[99.90, 99.30, 94.31, 5.69],
[
"any",
"ILE-10-A[169,171,172]",
"ILE-10-A[169,171]",
"PHE-82-A[1245,1246,1247,1248,1249,1250]",
],
)
],
)
def test_count_frequency(self, superfeature, count, frequency, envpartner_ids):
"""
Test class property.
"""
count =
|
pd.Series(count, index=envpartner_ids)
|
pandas.Series
|
import pytest
import pandas as pd
import numpy as np
import collate
import count_junction_reads as cjr
# dummy classes
class Read:
def __init__(self, contig, start, end, name):
self.contig = contig
self.reference_start = start
self.reference_end = end
self.query_name = name
class AlignmentFile:
def __init__(self, reads):
self.reads = []
for read in reads:
contig, start, end, name = read
self.reads.append(Read(contig, start, end, name))
def fetch(self, contig, start, end, until_eof=True):
# dummy method
return self.reads
test_reads = [('1', 100, 200, 'A'),
('1', 105, 205, 'B'),
('1', 200, 300, 'C')]
#def test_parse_args():
# args = collate.parse_args(['sample',
# 'contig_info.tsv',
# 'de_results.tsv',
# '--gene_filter', 'gene_filter.txt',
# '--var_filter', 'FUS INS DEL'])
# assert args.sample == 'sample'
# assert args.contig_info == 'contig_info.tsv'
# assert args.gene_filter == 'gene_filter.txt'
# assert args.var_filter == ['FUS INS DEL']
@pytest.mark.parametrize('gene,expected', [('A', 'A'),
('A|B', 'A'),
('A:B', 'A|B'),
('A|B|C:X|Y|Z', 'A|X')])
def test_get_short_gene_name(gene, expected):
assert collate.get_short_gene_name(gene) == expected
def test_make_junctions():
st_blocks = {'start': [100, 200, 300],
'end': [150, 202, 311]}
st_blocks =
|
pd.DataFrame.from_dict(st_blocks)
|
pandas.DataFrame.from_dict
|
import time, os, pickle
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify, make_response, Response
from flask_restplus import Api, fields, Resource
from flask_cors import CORS, cross_origin
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
app = Flask(__name__)
CORS(app)
app = Flask(__name__)
api = Api(
app,
version='1.0',
title='Santander Customer Transaction Prediction API',
description='Customer Transaction Prediction API')
ns = api.namespace('transaction_prediction', description='Transaction Prediction')
model = pickle.load(open('./lgbm.pkl','rb'))
parser = api.parser()
#In this case, my number of features: length=202
number_features = 200
#parse features for API
for idx in range(number_features):
parser.add_argument(
'var_'+str(idx),
type=float,
required=True,
help='feature'+str(idx),
location='form'
)
parser.add_argument(
'ID_code',
type=str,
required=False,
help='id',
location='form'
)
# Setup the request parameters, feed them into the model, and determine the transaction prediction (prababilites).
resource_fields = api.model('Resource', {
'result': fields.List(fields.Float)
})
upload_parser = api.parser()
upload_parser.add_argument('file', location='files', type=FileStorage, required=True)
@ns.route('/upload/')
@api.expect(upload_parser)
class Upload(Resource):
def post(self):
args = upload_parser.parse_args()
file = args.get('file') # This is FileStorage instance
result = pd.read_csv(file)
return {'url': result}, 201
def get_results(self, file):
df =
|
pd.DataFrame(file)
|
pandas.DataFrame
|
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
import copy
import random
import pandas as pd
linkages = ['a1-2','a1-3','a1-4','a1-6','a2-3','a2-6','a2-8','b1-2','b1-3',
'b1-4','b1-6']
def seed_wildcard_hierarchy(glycans, labels, wildcard_list,
wildcard_name, r = 0.1):
"""adds dataframe rows in which glycan parts have been replaced with the appropriate wildcards\n
| Arguments:
| :-
| glycans (list): list of IUPAC-condensed glycan sequences as strings
| labels (list): list of labels used for prediction
| wildcard_list (list): list which glycoletters a wildcard encompasses
| wildcard_name (string): how the wildcard should be named in the IUPAC-condensed nomenclature
| r (float): rate of replacement, default is 0.1 or 10%\n
| Returns:
| :-
| Returns list of glycans (strings) and labels (flexible) where some glycan parts have been replaced with wildcard_name
"""
added_glycans = []
added_labels = []
for k in range(len(glycans)):
temp = glycans[k]
for j in wildcard_list:
if j in temp:
if random.uniform(0, 1) < r:
added_glycans.append(temp.replace(j, wildcard_name))
added_labels.append(labels[k])
glycans += added_glycans
labels += added_labels
return glycans, labels
def hierarchy_filter(df_in, rank = 'Domain', min_seq = 5, wildcard_seed = False, wildcard_list = None,
wildcard_name = None, r = 0.1, col = 'target'):
"""stratified data split in train/test at the taxonomic level, removing duplicate glycans and infrequent classes\n
| Arguments:
| :-
| df_in (dataframe): dataframe of glycan sequences and taxonomic labels
| rank (string): which rank should be filtered; default is 'domain'
| min_seq (int): how many glycans need to be present in class to keep it; default is 5
| wildcard_seed (bool): set to True if you want to seed wildcard glycoletters; default is False
| wildcard_list (list): list which glycoletters a wildcard encompasses
| wildcard_name (string): how the wildcard should be named in the IUPAC-condensed nomenclature
| r (float): rate of replacement, default is 0.1 or 10%
| col (string): column name for glycan sequences; default: target\n
| Returns:
| :-
| Returns train_x, val_x (lists of glycans (strings) after stratified shuffle split)
| train_y, val_y (lists of taxonomic labels (mapped integers))
| id_val (taxonomic labels in text form (strings))
| class_list (list of unique taxonomic classes (strings))
| class_converter (dictionary to map mapped integers back to text labels)
"""
df = copy.deepcopy(df_in)
rank_list = ['Species','Genus','Family','Order',
'Class','Phylum','Kingdom','Domain']
rank_list.remove(rank)
df.drop(rank_list, axis = 1, inplace = True)
class_list = list(set(df[rank].values.tolist()))
class_list= [k for k in class_list if k != 'undetermined']
temp = []
for i in range(len(class_list)):
t = df[df[rank] == class_list[i]]
t = t.drop_duplicates('target', keep = 'first')
temp.append(t)
df =
|
pd.concat(temp)
|
pandas.concat
|
import sys
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
This function loads the message and categories files
merge them and return the new dataframe
input:
messages_filepath: The path of messages dataset.
categories_filepath: The path of categories dataset.
output:
df: The merged dataset
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages,categories, left_on='id', right_on='id', how='outer')
return df
def clean_data(df):
"""Clean the merged dataframe to make it ready to analyze
Input: DataFrame
Output: cleaned dataframe
"""
# create a dataframe of the 36 individual category columns
categories = pd.Series(df['categories']).str.split(pat=';',expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0]
# use this row to extract a list of new column names for categories.
category_colnames = row.apply(lambda x: x[:-2]).values.tolist()
# rename the columns of `categories`
categories.columns = category_colnames
categories.related.loc[categories.related == 'related-2'] = 'related-1'
for column in categories:
# set each value to be the last character of the string
categories[column] = pd.Series(categories[column]).str.split('-').str[1]
# convert column from string to numeric
categories[column] =
|
pd.to_numeric(categories[column])
|
pandas.to_numeric
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.